Index: head/sys/crypto/chacha20/chacha-sw.c =================================================================== --- head/sys/crypto/chacha20/chacha-sw.c (revision 367308) +++ head/sys/crypto/chacha20/chacha-sw.c (revision 367309) @@ -1,57 +1,57 @@ /* This file is in the public domain. */ #include __FBSDID("$FreeBSD$"); #include #include static int chacha20_xform_setkey(void *ctx, const uint8_t *key, int len) { if (len != CHACHA_MINKEYLEN && len != 32) return (EINVAL); chacha_keysetup(ctx, key, len * 8); return (0); } static void -chacha20_xform_reinit(void *ctx, const u_int8_t *iv) +chacha20_xform_reinit(void *ctx, const uint8_t *iv) { chacha_ivsetup(ctx, iv + 8, iv); } static void chacha20_xform_crypt(void *ctx, const uint8_t *in, uint8_t *out) { chacha_encrypt_bytes(ctx, in, out, CHACHA_BLOCKLEN); } static void chacha20_xform_crypt_last(void *ctx, const uint8_t *in, uint8_t *out, size_t len) { chacha_encrypt_bytes(ctx, in, out, len); } struct enc_xform enc_xform_chacha20 = { .type = CRYPTO_CHACHA20, .name = "chacha20", .ctxsize = sizeof(struct chacha_ctx), .blocksize = 1, .native_blocksize = CHACHA_BLOCKLEN, .ivsize = CHACHA_NONCELEN + CHACHA_CTRLEN, .minkey = CHACHA_MINKEYLEN, .maxkey = 32, .encrypt = chacha20_xform_crypt, .decrypt = chacha20_xform_crypt, .setkey = chacha20_xform_setkey, .reinit = chacha20_xform_reinit, .encrypt_last = chacha20_xform_crypt_last, .decrypt_last = chacha20_xform_crypt_last, }; Index: head/sys/crypto/des/des.h =================================================================== --- head/sys/crypto/des/des.h (revision 367308) +++ head/sys/crypto/des/des.h (revision 367309) @@ -1,110 +1,110 @@ /* $FreeBSD$ */ /* $KAME: des.h,v 1.8 2001/09/10 04:03:57 itojun Exp $ */ /* lib/des/des.h */ /* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) * All rights reserved. * * This file is part of an SSL implementation written * by Eric Young (eay@mincom.oz.au). * The implementation was written so as to conform with Netscapes SSL * specification. This library and applications are * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE * as long as the following conditions are aheared to. * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. If this code is used in a product, * Eric Young should be given attribution as the author of the parts used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Eric Young (eay@mincom.oz.au) * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #ifndef HEADER_DES_H #define HEADER_DES_H #ifdef __cplusplus extern "C" { #endif /* must be 32bit quantity */ -#define DES_LONG u_int32_t +#define DES_LONG uint32_t typedef unsigned char des_cblock[8]; typedef struct des_ks_struct { union { des_cblock cblock; /* make sure things are correct size on machines with * 8 byte longs */ DES_LONG deslong[2]; } ks; int weak_key; } des_key_schedule[16]; #define DES_KEY_SZ (sizeof(des_cblock)) #define DES_SCHEDULE_SZ (sizeof(des_key_schedule)) #define DES_ENCRYPT 1 #define DES_DECRYPT 0 #define DES_CBC_MODE 0 #define DES_PCBC_MODE 1 extern int des_check_key; /* defaults to false */ char *des_options(void); void des_ecb_encrypt(unsigned char *, unsigned char *, des_key_schedule, int); void des_encrypt1(DES_LONG *, des_key_schedule, int); void des_encrypt2(DES_LONG *, des_key_schedule, int); void des_encrypt3(DES_LONG *, des_key_schedule, des_key_schedule, des_key_schedule); void des_decrypt3(DES_LONG *, des_key_schedule, des_key_schedule, des_key_schedule); void des_ecb3_encrypt(unsigned char *, unsigned char *, des_key_schedule, des_key_schedule, des_key_schedule, int); void des_set_odd_parity(unsigned char *); void des_fixup_key_parity(unsigned char *); int des_is_weak_key(const unsigned char *); int des_set_key(const unsigned char *, des_key_schedule); int des_key_sched(const unsigned char *, des_key_schedule); int des_set_key_checked(const unsigned char *, des_key_schedule); void des_set_key_unchecked(const unsigned char *, des_key_schedule); int des_check_key_parity(const unsigned char *); #ifdef __cplusplus } #endif #endif Index: head/sys/crypto/rijndael/rijndael-api-fst.c =================================================================== --- head/sys/crypto/rijndael/rijndael-api-fst.c (revision 367308) +++ head/sys/crypto/rijndael/rijndael-api-fst.c (revision 367309) @@ -1,451 +1,451 @@ /* $KAME: rijndael-api-fst.c,v 1.10 2001/05/27 09:34:18 itojun Exp $ */ /* * rijndael-api-fst.c v2.3 April '2000 * * Optimised ANSI C code * * authors: v1.0: Antoon Bosselaers * v2.0: Vincent Rijmen * v2.1: Vincent Rijmen * v2.2: Vincent Rijmen * v2.3: Paulo Barreto * v2.4: Vincent Rijmen * * This code is placed in the public domain. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #else #include #endif #include #include #ifndef TRUE #define TRUE 1 #endif -typedef u_int8_t BYTE; +typedef uint8_t BYTE; int rijndael_makeKey(keyInstance *key, BYTE direction, int keyLen, const char *keyMaterial) { if (key == NULL) { return BAD_KEY_INSTANCE; } if ((direction == DIR_ENCRYPT) || (direction == DIR_DECRYPT)) { key->direction = direction; } else { return BAD_KEY_DIR; } if ((keyLen == 128) || (keyLen == 192) || (keyLen == 256)) { key->keyLen = keyLen; } else { return BAD_KEY_MAT; } if (keyMaterial != NULL) { memcpy(key->keyMaterial, keyMaterial, keyLen/8); } /* initialize key schedule: */ if (direction == DIR_ENCRYPT) { key->Nr = rijndaelKeySetupEnc(key->rk, key->keyMaterial, keyLen); } else { key->Nr = rijndaelKeySetupDec(key->rk, key->keyMaterial, keyLen); } rijndaelKeySetupEnc(key->ek, key->keyMaterial, keyLen); return TRUE; } int rijndael_cipherInit(cipherInstance *cipher, BYTE mode, char *IV) { if ((mode == MODE_ECB) || (mode == MODE_CBC) || (mode == MODE_CFB1)) { cipher->mode = mode; } else { return BAD_CIPHER_MODE; } if (IV != NULL) { memcpy(cipher->IV, IV, RIJNDAEL_MAX_IV_SIZE); } else { memset(cipher->IV, 0, RIJNDAEL_MAX_IV_SIZE); } return TRUE; } int rijndael_blockEncrypt(cipherInstance *cipher, keyInstance *key, const BYTE *input, int inputLen, BYTE *outBuffer) { int i, k, numBlocks; - u_int8_t block[16], iv[4][4]; + uint8_t block[16], iv[4][4]; if (cipher == NULL || key == NULL || key->direction == DIR_DECRYPT) { return BAD_CIPHER_STATE; } if (input == NULL || inputLen <= 0) { return 0; /* nothing to do */ } numBlocks = inputLen/128; switch (cipher->mode) { case MODE_ECB: for (i = numBlocks; i > 0; i--) { rijndaelEncrypt(key->rk, key->Nr, input, outBuffer); input += 16; outBuffer += 16; } break; case MODE_CBC: #if 1 /*STRICT_ALIGN*/ memcpy(block, cipher->IV, 16); memcpy(iv, input, 16); - ((u_int32_t*)block)[0] ^= ((u_int32_t*)iv)[0]; - ((u_int32_t*)block)[1] ^= ((u_int32_t*)iv)[1]; - ((u_int32_t*)block)[2] ^= ((u_int32_t*)iv)[2]; - ((u_int32_t*)block)[3] ^= ((u_int32_t*)iv)[3]; + ((uint32_t*)block)[0] ^= ((uint32_t*)iv)[0]; + ((uint32_t*)block)[1] ^= ((uint32_t*)iv)[1]; + ((uint32_t*)block)[2] ^= ((uint32_t*)iv)[2]; + ((uint32_t*)block)[3] ^= ((uint32_t*)iv)[3]; #else - ((u_int32_t*)block)[0] = ((u_int32_t*)cipher->IV)[0] ^ ((u_int32_t*)input)[0]; - ((u_int32_t*)block)[1] = ((u_int32_t*)cipher->IV)[1] ^ ((u_int32_t*)input)[1]; - ((u_int32_t*)block)[2] = ((u_int32_t*)cipher->IV)[2] ^ ((u_int32_t*)input)[2]; - ((u_int32_t*)block)[3] = ((u_int32_t*)cipher->IV)[3] ^ ((u_int32_t*)input)[3]; + ((uint32_t*)block)[0] = ((uint32_t*)cipher->IV)[0] ^ ((uint32_t*)input)[0]; + ((uint32_t*)block)[1] = ((uint32_t*)cipher->IV)[1] ^ ((uint32_t*)input)[1]; + ((uint32_t*)block)[2] = ((uint32_t*)cipher->IV)[2] ^ ((uint32_t*)input)[2]; + ((uint32_t*)block)[3] = ((uint32_t*)cipher->IV)[3] ^ ((uint32_t*)input)[3]; #endif rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); input += 16; for (i = numBlocks - 1; i > 0; i--) { #if 1 /*STRICT_ALIGN*/ memcpy(block, outBuffer, 16); memcpy(iv, input, 16); - ((u_int32_t*)block)[0] ^= ((u_int32_t*)iv)[0]; - ((u_int32_t*)block)[1] ^= ((u_int32_t*)iv)[1]; - ((u_int32_t*)block)[2] ^= ((u_int32_t*)iv)[2]; - ((u_int32_t*)block)[3] ^= ((u_int32_t*)iv)[3]; + ((uint32_t*)block)[0] ^= ((uint32_t*)iv)[0]; + ((uint32_t*)block)[1] ^= ((uint32_t*)iv)[1]; + ((uint32_t*)block)[2] ^= ((uint32_t*)iv)[2]; + ((uint32_t*)block)[3] ^= ((uint32_t*)iv)[3]; #else - ((u_int32_t*)block)[0] = ((u_int32_t*)outBuffer)[0] ^ ((u_int32_t*)input)[0]; - ((u_int32_t*)block)[1] = ((u_int32_t*)outBuffer)[1] ^ ((u_int32_t*)input)[1]; - ((u_int32_t*)block)[2] = ((u_int32_t*)outBuffer)[2] ^ ((u_int32_t*)input)[2]; - ((u_int32_t*)block)[3] = ((u_int32_t*)outBuffer)[3] ^ ((u_int32_t*)input)[3]; + ((uint32_t*)block)[0] = ((uint32_t*)outBuffer)[0] ^ ((uint32_t*)input)[0]; + ((uint32_t*)block)[1] = ((uint32_t*)outBuffer)[1] ^ ((uint32_t*)input)[1]; + ((uint32_t*)block)[2] = ((uint32_t*)outBuffer)[2] ^ ((uint32_t*)input)[2]; + ((uint32_t*)block)[3] = ((uint32_t*)outBuffer)[3] ^ ((uint32_t*)input)[3]; #endif outBuffer += 16; rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); input += 16; } break; case MODE_CFB1: #if 1 /*STRICT_ALIGN*/ memcpy(iv, cipher->IV, 16); #else /* !STRICT_ALIGN */ - *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV )); - *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4)); - *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8)); - *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12)); + *((uint32_t*)iv[0]) = *((uint32_t*)(cipher->IV )); + *((uint32_t*)iv[1]) = *((uint32_t*)(cipher->IV+ 4)); + *((uint32_t*)iv[2]) = *((uint32_t*)(cipher->IV+ 8)); + *((uint32_t*)iv[3]) = *((uint32_t*)(cipher->IV+12)); #endif /* ?STRICT_ALIGN */ for (i = numBlocks; i > 0; i--) { for (k = 0; k < 128; k++) { - *((u_int32_t*) block ) = *((u_int32_t*)iv[0]); - *((u_int32_t*)(block+ 4)) = *((u_int32_t*)iv[1]); - *((u_int32_t*)(block+ 8)) = *((u_int32_t*)iv[2]); - *((u_int32_t*)(block+12)) = *((u_int32_t*)iv[3]); + *((uint32_t*) block ) = *((uint32_t*)iv[0]); + *((uint32_t*)(block+ 4)) = *((uint32_t*)iv[1]); + *((uint32_t*)(block+ 8)) = *((uint32_t*)iv[2]); + *((uint32_t*)(block+12)) = *((uint32_t*)iv[3]); rijndaelEncrypt(key->ek, key->Nr, block, block); outBuffer[k/8] ^= (block[0] & 0x80) >> (k & 7); iv[0][0] = (iv[0][0] << 1) | (iv[0][1] >> 7); iv[0][1] = (iv[0][1] << 1) | (iv[0][2] >> 7); iv[0][2] = (iv[0][2] << 1) | (iv[0][3] >> 7); iv[0][3] = (iv[0][3] << 1) | (iv[1][0] >> 7); iv[1][0] = (iv[1][0] << 1) | (iv[1][1] >> 7); iv[1][1] = (iv[1][1] << 1) | (iv[1][2] >> 7); iv[1][2] = (iv[1][2] << 1) | (iv[1][3] >> 7); iv[1][3] = (iv[1][3] << 1) | (iv[2][0] >> 7); iv[2][0] = (iv[2][0] << 1) | (iv[2][1] >> 7); iv[2][1] = (iv[2][1] << 1) | (iv[2][2] >> 7); iv[2][2] = (iv[2][2] << 1) | (iv[2][3] >> 7); iv[2][3] = (iv[2][3] << 1) | (iv[3][0] >> 7); iv[3][0] = (iv[3][0] << 1) | (iv[3][1] >> 7); iv[3][1] = (iv[3][1] << 1) | (iv[3][2] >> 7); iv[3][2] = (iv[3][2] << 1) | (iv[3][3] >> 7); iv[3][3] = (iv[3][3] << 1) | ((outBuffer[k/8] >> (7-(k&7))) & 1); } } break; default: return BAD_CIPHER_STATE; } explicit_bzero(block, sizeof(block)); return 128*numBlocks; } /** * Encrypt data partitioned in octets, using RFC 2040-like padding. * * @param input data to be encrypted (octet sequence) * @param inputOctets input length in octets (not bits) * @param outBuffer encrypted output data * * @return length in octets (not bits) of the encrypted output buffer. */ int rijndael_padEncrypt(cipherInstance *cipher, keyInstance *key, const BYTE *input, int inputOctets, BYTE *outBuffer) { int i, numBlocks, padLen; - u_int8_t block[16], *iv, *cp; + uint8_t block[16], *iv, *cp; if (cipher == NULL || key == NULL || key->direction == DIR_DECRYPT) { return BAD_CIPHER_STATE; } if (input == NULL || inputOctets <= 0) { return 0; /* nothing to do */ } numBlocks = inputOctets/16; switch (cipher->mode) { case MODE_ECB: for (i = numBlocks; i > 0; i--) { rijndaelEncrypt(key->rk, key->Nr, input, outBuffer); input += 16; outBuffer += 16; } padLen = 16 - (inputOctets - 16*numBlocks); if (padLen <= 0 || padLen > 16) return BAD_CIPHER_STATE; memcpy(block, input, 16 - padLen); for (cp = block + 16 - padLen; cp < block + 16; cp++) *cp = padLen; rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); break; case MODE_CBC: iv = cipher->IV; for (i = numBlocks; i > 0; i--) { - ((u_int32_t*)block)[0] = ((const u_int32_t*)input)[0] ^ ((u_int32_t*)iv)[0]; - ((u_int32_t*)block)[1] = ((const u_int32_t*)input)[1] ^ ((u_int32_t*)iv)[1]; - ((u_int32_t*)block)[2] = ((const u_int32_t*)input)[2] ^ ((u_int32_t*)iv)[2]; - ((u_int32_t*)block)[3] = ((const u_int32_t*)input)[3] ^ ((u_int32_t*)iv)[3]; + ((uint32_t*)block)[0] = ((const uint32_t*)input)[0] ^ ((uint32_t*)iv)[0]; + ((uint32_t*)block)[1] = ((const uint32_t*)input)[1] ^ ((uint32_t*)iv)[1]; + ((uint32_t*)block)[2] = ((const uint32_t*)input)[2] ^ ((uint32_t*)iv)[2]; + ((uint32_t*)block)[3] = ((const uint32_t*)input)[3] ^ ((uint32_t*)iv)[3]; rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); iv = outBuffer; input += 16; outBuffer += 16; } padLen = 16 - (inputOctets - 16*numBlocks); if (padLen <= 0 || padLen > 16) return BAD_CIPHER_STATE; for (i = 0; i < 16 - padLen; i++) { block[i] = input[i] ^ iv[i]; } for (i = 16 - padLen; i < 16; i++) { block[i] = (BYTE)padLen ^ iv[i]; } rijndaelEncrypt(key->rk, key->Nr, block, outBuffer); break; default: return BAD_CIPHER_STATE; } explicit_bzero(block, sizeof(block)); return 16*(numBlocks + 1); } int rijndael_blockDecrypt(cipherInstance *cipher, keyInstance *key, const BYTE *input, int inputLen, BYTE *outBuffer) { int i, k, numBlocks; - u_int8_t block[16], iv[4][4]; + uint8_t block[16], iv[4][4]; if (cipher == NULL || key == NULL || (cipher->mode != MODE_CFB1 && key->direction == DIR_ENCRYPT)) { return BAD_CIPHER_STATE; } if (input == NULL || inputLen <= 0) { return 0; /* nothing to do */ } numBlocks = inputLen/128; switch (cipher->mode) { case MODE_ECB: for (i = numBlocks; i > 0; i--) { rijndaelDecrypt(key->rk, key->Nr, input, outBuffer); input += 16; outBuffer += 16; } break; case MODE_CBC: #if 1 /*STRICT_ALIGN */ memcpy(iv, cipher->IV, 16); #else - *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV )); - *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4)); - *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8)); - *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12)); + *((uint32_t*)iv[0]) = *((uint32_t*)(cipher->IV )); + *((uint32_t*)iv[1]) = *((uint32_t*)(cipher->IV+ 4)); + *((uint32_t*)iv[2]) = *((uint32_t*)(cipher->IV+ 8)); + *((uint32_t*)iv[3]) = *((uint32_t*)(cipher->IV+12)); #endif for (i = numBlocks; i > 0; i--) { rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u_int32_t*)block)[0] ^= *((u_int32_t*)iv[0]); - ((u_int32_t*)block)[1] ^= *((u_int32_t*)iv[1]); - ((u_int32_t*)block)[2] ^= *((u_int32_t*)iv[2]); - ((u_int32_t*)block)[3] ^= *((u_int32_t*)iv[3]); + ((uint32_t*)block)[0] ^= *((uint32_t*)iv[0]); + ((uint32_t*)block)[1] ^= *((uint32_t*)iv[1]); + ((uint32_t*)block)[2] ^= *((uint32_t*)iv[2]); + ((uint32_t*)block)[3] ^= *((uint32_t*)iv[3]); #if 1 /*STRICT_ALIGN*/ memcpy(iv, input, 16); memcpy(outBuffer, block, 16); #else - *((u_int32_t*)iv[0]) = ((u_int32_t*)input)[0]; ((u_int32_t*)outBuffer)[0] = ((u_int32_t*)block)[0]; - *((u_int32_t*)iv[1]) = ((u_int32_t*)input)[1]; ((u_int32_t*)outBuffer)[1] = ((u_int32_t*)block)[1]; - *((u_int32_t*)iv[2]) = ((u_int32_t*)input)[2]; ((u_int32_t*)outBuffer)[2] = ((u_int32_t*)block)[2]; - *((u_int32_t*)iv[3]) = ((u_int32_t*)input)[3]; ((u_int32_t*)outBuffer)[3] = ((u_int32_t*)block)[3]; + *((uint32_t*)iv[0]) = ((uint32_t*)input)[0]; ((uint32_t*)outBuffer)[0] = ((uint32_t*)block)[0]; + *((uint32_t*)iv[1]) = ((uint32_t*)input)[1]; ((uint32_t*)outBuffer)[1] = ((uint32_t*)block)[1]; + *((uint32_t*)iv[2]) = ((uint32_t*)input)[2]; ((uint32_t*)outBuffer)[2] = ((uint32_t*)block)[2]; + *((uint32_t*)iv[3]) = ((uint32_t*)input)[3]; ((uint32_t*)outBuffer)[3] = ((uint32_t*)block)[3]; #endif input += 16; outBuffer += 16; } break; case MODE_CFB1: #if 1 /*STRICT_ALIGN */ memcpy(iv, cipher->IV, 16); #else - *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV)); - *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4)); - *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8)); - *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12)); + *((uint32_t*)iv[0]) = *((uint32_t*)(cipher->IV)); + *((uint32_t*)iv[1]) = *((uint32_t*)(cipher->IV+ 4)); + *((uint32_t*)iv[2]) = *((uint32_t*)(cipher->IV+ 8)); + *((uint32_t*)iv[3]) = *((uint32_t*)(cipher->IV+12)); #endif for (i = numBlocks; i > 0; i--) { for (k = 0; k < 128; k++) { - *((u_int32_t*) block ) = *((u_int32_t*)iv[0]); - *((u_int32_t*)(block+ 4)) = *((u_int32_t*)iv[1]); - *((u_int32_t*)(block+ 8)) = *((u_int32_t*)iv[2]); - *((u_int32_t*)(block+12)) = *((u_int32_t*)iv[3]); + *((uint32_t*) block ) = *((uint32_t*)iv[0]); + *((uint32_t*)(block+ 4)) = *((uint32_t*)iv[1]); + *((uint32_t*)(block+ 8)) = *((uint32_t*)iv[2]); + *((uint32_t*)(block+12)) = *((uint32_t*)iv[3]); rijndaelEncrypt(key->ek, key->Nr, block, block); iv[0][0] = (iv[0][0] << 1) | (iv[0][1] >> 7); iv[0][1] = (iv[0][1] << 1) | (iv[0][2] >> 7); iv[0][2] = (iv[0][2] << 1) | (iv[0][3] >> 7); iv[0][3] = (iv[0][3] << 1) | (iv[1][0] >> 7); iv[1][0] = (iv[1][0] << 1) | (iv[1][1] >> 7); iv[1][1] = (iv[1][1] << 1) | (iv[1][2] >> 7); iv[1][2] = (iv[1][2] << 1) | (iv[1][3] >> 7); iv[1][3] = (iv[1][3] << 1) | (iv[2][0] >> 7); iv[2][0] = (iv[2][0] << 1) | (iv[2][1] >> 7); iv[2][1] = (iv[2][1] << 1) | (iv[2][2] >> 7); iv[2][2] = (iv[2][2] << 1) | (iv[2][3] >> 7); iv[2][3] = (iv[2][3] << 1) | (iv[3][0] >> 7); iv[3][0] = (iv[3][0] << 1) | (iv[3][1] >> 7); iv[3][1] = (iv[3][1] << 1) | (iv[3][2] >> 7); iv[3][2] = (iv[3][2] << 1) | (iv[3][3] >> 7); iv[3][3] = (iv[3][3] << 1) | ((input[k/8] >> (7-(k&7))) & 1); outBuffer[k/8] ^= (block[0] & 0x80) >> (k & 7); } } break; default: return BAD_CIPHER_STATE; } explicit_bzero(block, sizeof(block)); return 128*numBlocks; } int rijndael_padDecrypt(cipherInstance *cipher, keyInstance *key, const BYTE *input, int inputOctets, BYTE *outBuffer) { int i, numBlocks, padLen, rval; - u_int8_t block[16]; - u_int32_t iv[4]; + uint8_t block[16]; + uint32_t iv[4]; if (cipher == NULL || key == NULL || key->direction == DIR_ENCRYPT) { return BAD_CIPHER_STATE; } if (input == NULL || inputOctets <= 0) { return 0; /* nothing to do */ } if (inputOctets % 16 != 0) { return BAD_DATA; } numBlocks = inputOctets/16; switch (cipher->mode) { case MODE_ECB: /* all blocks but last */ for (i = numBlocks - 1; i > 0; i--) { rijndaelDecrypt(key->rk, key->Nr, input, outBuffer); input += 16; outBuffer += 16; } /* last block */ rijndaelDecrypt(key->rk, key->Nr, input, block); padLen = block[15]; if (padLen >= 16) { rval = BAD_DATA; goto out; } for (i = 16 - padLen; i < 16; i++) { if (block[i] != padLen) { rval = BAD_DATA; goto out; } } memcpy(outBuffer, block, 16 - padLen); break; case MODE_CBC: memcpy(iv, cipher->IV, 16); /* all blocks but last */ for (i = numBlocks - 1; i > 0; i--) { rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u_int32_t*)block)[0] ^= iv[0]; - ((u_int32_t*)block)[1] ^= iv[1]; - ((u_int32_t*)block)[2] ^= iv[2]; - ((u_int32_t*)block)[3] ^= iv[3]; + ((uint32_t*)block)[0] ^= iv[0]; + ((uint32_t*)block)[1] ^= iv[1]; + ((uint32_t*)block)[2] ^= iv[2]; + ((uint32_t*)block)[3] ^= iv[3]; memcpy(iv, input, 16); memcpy(outBuffer, block, 16); input += 16; outBuffer += 16; } /* last block */ rijndaelDecrypt(key->rk, key->Nr, input, block); - ((u_int32_t*)block)[0] ^= iv[0]; - ((u_int32_t*)block)[1] ^= iv[1]; - ((u_int32_t*)block)[2] ^= iv[2]; - ((u_int32_t*)block)[3] ^= iv[3]; + ((uint32_t*)block)[0] ^= iv[0]; + ((uint32_t*)block)[1] ^= iv[1]; + ((uint32_t*)block)[2] ^= iv[2]; + ((uint32_t*)block)[3] ^= iv[3]; padLen = block[15]; if (padLen <= 0 || padLen > 16) { rval = BAD_DATA; goto out; } for (i = 16 - padLen; i < 16; i++) { if (block[i] != padLen) { rval = BAD_DATA; goto out; } } memcpy(outBuffer, block, 16 - padLen); break; default: return BAD_CIPHER_STATE; } rval = 16*numBlocks - padLen; out: explicit_bzero(block, sizeof(block)); return rval; } Index: head/sys/crypto/rijndael/rijndael.h =================================================================== --- head/sys/crypto/rijndael/rijndael.h (revision 367308) +++ head/sys/crypto/rijndael/rijndael.h (revision 367309) @@ -1,55 +1,55 @@ /* $KAME: rijndael.h,v 1.6 2003/08/28 08:36:32 itojun Exp $ */ /* $FreeBSD$ */ /** * rijndael-alg-fst.h * * @version 3.0 (December 2000) * * Optimised ANSI C code for the Rijndael cipher (now AES) * * @author Vincent Rijmen * @author Antoon Bosselaers * @author Paulo Barreto * * This code is hereby placed in the public domain. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __RIJNDAEL_H #define __RIJNDAEL_H #define RIJNDAEL_MAXKC (256/32) #define RIJNDAEL_MAXKB (256/8) #define RIJNDAEL_MAXNR 14 typedef struct { int decrypt; int Nr; /* key-length-dependent number of rounds */ uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; /* encrypt key schedule */ uint32_t dk[4 * (RIJNDAEL_MAXNR + 1)]; /* decrypt key schedule */ } rijndael_ctx; void rijndael_set_key(rijndael_ctx *, const u_char *, int); void rijndael_decrypt(const rijndael_ctx *, const u_char *, u_char *); void rijndael_encrypt(const rijndael_ctx *, const u_char *, u_char *); -int rijndaelKeySetupEnc(u_int32_t [/*4*(Nr+1)*/], const u_int8_t [], int); -int rijndaelKeySetupDec(u_int32_t [/*4*(Nr+1)*/], const u_int8_t [], int); -void rijndaelEncrypt(const u_int32_t [/*4*(Nr+1)*/], int, - const u_int8_t[16], u_int8_t [16]); -void rijndaelDecrypt(const u_int32_t [/*4*(Nr+1)*/], int, - const u_int8_t [16], u_int8_t [16]); +int rijndaelKeySetupEnc(uint32_t [/*4*(Nr+1)*/], const uint8_t [], int); +int rijndaelKeySetupDec(uint32_t [/*4*(Nr+1)*/], const uint8_t [], int); +void rijndaelEncrypt(const uint32_t [/*4*(Nr+1)*/], int, + const uint8_t[16], uint8_t [16]); +void rijndaelDecrypt(const uint32_t [/*4*(Nr+1)*/], int, + const uint8_t [16], uint8_t [16]); #endif /* __RIJNDAEL_H */ Index: head/sys/crypto/rijndael/rijndael_local.h =================================================================== --- head/sys/crypto/rijndael/rijndael_local.h (revision 367308) +++ head/sys/crypto/rijndael/rijndael_local.h (revision 367309) @@ -1,7 +1,7 @@ /* $KAME: rijndael_local.h,v 1.5 2003/08/28 08:37:24 itojun Exp $ */ /* $FreeBSD$ */ /* the file should not be used from outside */ -typedef u_int8_t u8; -typedef u_int16_t u16; -typedef u_int32_t u32; +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; Index: head/sys/crypto/sha1.c =================================================================== --- head/sys/crypto/sha1.c (revision 367308) +++ head/sys/crypto/sha1.c (revision 367309) @@ -1,276 +1,276 @@ /* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * FIPS pub 180-1: Secure Hash Algorithm (SHA-1) * based on: http://csrc.nist.gov/fips/fip180-1.txt * implemented by Jun-ichiro itojun Itoh */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include /* sanity check */ #if BYTE_ORDER != BIG_ENDIAN # if BYTE_ORDER != LITTLE_ENDIAN # define unsupported 1 # endif #endif #ifndef unsupported /* constant table */ -static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 }; +static uint32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 }; #define K(t) _K[(t) / 20] #define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d))) #define F1(b, c, d) (((b) ^ (c)) ^ (d)) #define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) #define F3(b, c, d) (((b) ^ (c)) ^ (d)) #define S(n, x) (((x) << (n)) | ((x) >> (32 - n))) #define H(n) (ctxt->h.b32[(n)]) #define COUNT (ctxt->count) #define BCOUNT (ctxt->c.b64[0] / 8) #define W(n) (ctxt->m.b32[(n)]) #define PUTBYTE(x) { \ ctxt->m.b8[(COUNT % 64)] = (x); \ COUNT++; \ COUNT %= 64; \ ctxt->c.b64[0] += 8; \ if (COUNT % 64 == 0) \ sha1_step(ctxt); \ } #define PUTPAD(x) { \ ctxt->m.b8[(COUNT % 64)] = (x); \ COUNT++; \ COUNT %= 64; \ if (COUNT % 64 == 0) \ sha1_step(ctxt); \ } static void sha1_step(struct sha1_ctxt *); static void sha1_step(ctxt) struct sha1_ctxt *ctxt; { - u_int32_t a, b, c, d, e; + uint32_t a, b, c, d, e; size_t t, s; - u_int32_t tmp; + uint32_t tmp; #if BYTE_ORDER == LITTLE_ENDIAN struct sha1_ctxt tctxt; bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64); ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2]; ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0]; ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6]; ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4]; ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10]; ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8]; ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14]; ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12]; ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18]; ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16]; ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22]; ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20]; ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26]; ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24]; ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30]; ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28]; ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34]; ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32]; ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38]; ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36]; ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42]; ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40]; ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46]; ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44]; ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50]; ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48]; ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54]; ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52]; ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58]; ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56]; ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62]; ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60]; #endif a = H(0); b = H(1); c = H(2); d = H(3); e = H(4); for (t = 0; t < 20; t++) { s = t & 0x0f; if (t >= 16) { W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); } tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t); e = d; d = c; c = S(30, b); b = a; a = tmp; } for (t = 20; t < 40; t++) { s = t & 0x0f; W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t); e = d; d = c; c = S(30, b); b = a; a = tmp; } for (t = 40; t < 60; t++) { s = t & 0x0f; W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t); e = d; d = c; c = S(30, b); b = a; a = tmp; } for (t = 60; t < 80; t++) { s = t & 0x0f; W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t); e = d; d = c; c = S(30, b); b = a; a = tmp; } H(0) = H(0) + a; H(1) = H(1) + b; H(2) = H(2) + c; H(3) = H(3) + d; H(4) = H(4) + e; bzero(&ctxt->m.b8[0], 64); } /*------------------------------------------------------------*/ void sha1_init(ctxt) struct sha1_ctxt *ctxt; { bzero(ctxt, sizeof(struct sha1_ctxt)); H(0) = 0x67452301; H(1) = 0xefcdab89; H(2) = 0x98badcfe; H(3) = 0x10325476; H(4) = 0xc3d2e1f0; } void sha1_pad(ctxt) struct sha1_ctxt *ctxt; { size_t padlen; /*pad length in bytes*/ size_t padstart; PUTPAD(0x80); padstart = COUNT % 64; padlen = 64 - padstart; if (padlen < 8) { bzero(&ctxt->m.b8[padstart], padlen); COUNT += padlen; COUNT %= 64; sha1_step(ctxt); padstart = COUNT % 64; /* should be 0 */ padlen = 64 - padstart; /* should be 64 */ } bzero(&ctxt->m.b8[padstart], padlen - 8); COUNT += (padlen - 8); COUNT %= 64; #if BYTE_ORDER == BIG_ENDIAN PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]); #else PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]); #endif } void sha1_loop(ctxt, input, len) struct sha1_ctxt *ctxt; - const u_int8_t *input; + const uint8_t *input; size_t len; { size_t gaplen; size_t gapstart; size_t off; size_t copysiz; off = 0; while (off < len) { gapstart = COUNT % 64; gaplen = 64 - gapstart; copysiz = (gaplen < len - off) ? gaplen : len - off; bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz); COUNT += copysiz; COUNT %= 64; ctxt->c.b64[0] += copysiz * 8; if (COUNT % 64 == 0) sha1_step(ctxt); off += copysiz; } } void sha1_result(struct sha1_ctxt *ctxt, char digest0[static SHA1_RESULTLEN]) { - u_int8_t *digest; + uint8_t *digest; - digest = (u_int8_t *)digest0; + digest = (uint8_t *)digest0; sha1_pad(ctxt); #if BYTE_ORDER == BIG_ENDIAN bcopy(&ctxt->h.b8[0], digest, SHA1_RESULTLEN); #else digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2]; digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0]; digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6]; digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4]; digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10]; digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8]; digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14]; digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12]; digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18]; digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16]; #endif } #endif /*unsupported*/ Index: head/sys/crypto/sha1.h =================================================================== --- head/sys/crypto/sha1.h (revision 367308) +++ head/sys/crypto/sha1.h (revision 367309) @@ -1,74 +1,74 @@ /* $FreeBSD$ */ /* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * FIPS pub 180-1: Secure Hash Algorithm (SHA-1) * based on: http://csrc.nist.gov/fips/fip180-1.txt * implemented by Jun-ichiro itojun Itoh */ #ifndef _CRYPTO_SHA1_H_ #define _CRYPTO_SHA1_H_ struct sha1_ctxt { union { - u_int8_t b8[20]; - u_int32_t b32[5]; + uint8_t b8[20]; + uint32_t b32[5]; } h; union { - u_int8_t b8[8]; - u_int64_t b64[1]; + uint8_t b8[8]; + uint64_t b64[1]; } c; union { - u_int8_t b8[64]; - u_int32_t b32[16]; + uint8_t b8[64]; + uint32_t b32[16]; } m; - u_int8_t count; + uint8_t count; }; typedef struct sha1_ctxt SHA1_CTX; #define SHA1_RESULTLEN (160/8) #ifdef _KERNEL extern void sha1_init(struct sha1_ctxt *); extern void sha1_pad(struct sha1_ctxt *); -extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t); +extern void sha1_loop(struct sha1_ctxt *, const uint8_t *, size_t); extern void sha1_result(struct sha1_ctxt *, char[__min_size(SHA1_RESULTLEN)]); /* compatibilty with other SHA1 source codes */ #define SHA1Init(x) sha1_init((x)) #define SHA1Update(x, y, z) sha1_loop((x), (y), (z)) #define SHA1Final(x, y) sha1_result((y), (x)) #endif /* _KERNEL */ #endif /*_CRYPTO_SHA1_H_*/ Index: head/sys/crypto/skein/skein_port.h =================================================================== --- head/sys/crypto/skein/skein_port.h (revision 367308) +++ head/sys/crypto/skein/skein_port.h (revision 367309) @@ -1,168 +1,168 @@ /* $FreeBSD$ */ #ifndef _SKEIN_PORT_H_ #define _SKEIN_PORT_H_ /******************************************************************* ** ** Platform-specific definitions for Skein hash function. ** ** Source code author: Doug Whiting, 2008. ** ** This algorithm and source code is released to the public domain. ** ** Many thanks to Brian Gladman for his portable header files. ** ** To port Skein to an "unsupported" platform, change the definitions ** in this file appropriately. ** ********************************************************************/ #include #include #ifndef _OPENSOLARIS_SYS_TYPES_H_ /* Avoid redefining this typedef */ typedef unsigned int uint_t; /* native unsigned integer */ #endif -typedef u_int8_t u08b_t; /* 8-bit unsigned integer */ -typedef u_int32_t uint_32t; /* 32-bit unsigned integer */ -typedef u_int64_t u64b_t; /* 64-bit unsigned integer */ +typedef uint8_t u08b_t; /* 8-bit unsigned integer */ +typedef uint32_t uint_32t; /* 32-bit unsigned integer */ +typedef uint64_t u64b_t; /* 64-bit unsigned integer */ #ifndef RotL_64 #define RotL_64(x,N) (((x) << (N)) | ((x) >> (64-(N)))) #endif __BEGIN_DECLS /* * Skein is "natively" little-endian (unlike SHA-xxx), for optimal * performance on x86 CPUs. The Skein code requires the following * definitions for dealing with endianness: * * SKEIN_NEED_SWAP: 0 for little-endian, 1 for big-endian * Skein_Put64_LSB_First * Skein_Get64_LSB_First * Skein_Swap64 * * If SKEIN_NEED_SWAP is defined at compile time, it is used here * along with the portable versions of Put64/Get64/Swap64, which * are slow in general. * * Otherwise, an "auto-detect" of endianness is attempted below. * If the default handling doesn't work well, the user may insert * platform-specific code instead (e.g., for big-endian CPUs). * */ #ifndef SKEIN_NEED_SWAP /* compile-time "override" for endianness? */ #if BYTE_ORDER == BIG_ENDIAN /* here for big-endian CPUs */ #define SKEIN_NEED_SWAP (1) #ifdef SKEIN_PORT_CODE void Skein_Put64_LSB_First(u08b_t *dst,const u64b_t *src,size_t bCnt); void Skein_Get64_LSB_First(u64b_t *dst,const u08b_t *src,size_t wCnt); #endif /* ifdef SKEIN_PORT_CODE */ #elif BYTE_ORDER == LITTLE_ENDIAN /* here for x86 and x86-64 CPUs (and other detected little-endian CPUs) */ #define SKEIN_NEED_SWAP (0) #define Skein_Put64_LSB_First(dst08,src64,bCnt) memcpy(dst08,src64,bCnt) #define Skein_Get64_LSB_First(dst64,src08,wCnt) memcpy(dst64,src08,8*(wCnt)) #else #error "Skein needs endianness setting!" #endif #endif /* ifndef SKEIN_NEED_SWAP */ /* ****************************************************************** * Provide any definitions still needed. ****************************************************************** */ #ifndef Skein_Swap64 /* swap for big-endian, nop for little-endian */ #if SKEIN_NEED_SWAP #define Skein_Swap64(w64) bswap64(w64) #else #define Skein_Swap64(w64) (w64) #endif #endif /* ifndef Skein_Swap64 */ #ifndef Skein_Put64_LSB_First void Skein_Put64_LSB_First(u08b_t *dst,const u64b_t *src,size_t bCnt) #ifdef SKEIN_PORT_CODE /* instantiate the function code here? */ { size_t n; for (n = 0; n < bCnt / 8; n++) le64enc(dst + n * 8, src[n]); } #else ; /* output only the function prototype */ #endif #endif /* ifndef Skein_Put64_LSB_First */ #ifndef Skein_Get64_LSB_First void Skein_Get64_LSB_First(u64b_t *dst,const u08b_t *src,size_t wCnt) #ifdef SKEIN_PORT_CODE /* instantiate the function code here? */ { size_t n; for (n = 0; n < wCnt; n++) dst[n] = le64dec(src + n * 8); } #else ; /* output only the function prototype */ #endif #endif /* ifndef Skein_Get64_LSB_First */ /* Start FreeBSD libmd shims */ /* Ensure libmd symbols do not clash with libcrypto */ #ifndef SKEIN256_Init #define SKEIN256_Init _libmd_SKEIN256_Init #define SKEIN512_Init _libmd_SKEIN512_Init #define SKEIN1024_Init _libmd_SKEIN1024_Init #endif #ifndef SKEIN256_Update #define SKEIN256_Update _libmd_SKEIN256_Update #define SKEIN512_Update _libmd_SKEIN512_Update #define SKEIN1024_Update _libmd_SKEIN1024_Update #endif #ifndef SKEIN256_Final #define SKEIN256_Final _libmd_SKEIN256_Final #define SKEIN512_Final _libmd_SKEIN512_Final #define SKEIN1024_Final _libmd_SKEIN1024_Final #endif #ifndef SKEIN256_End #define SKEIN256_End _libmd_SKEIN256_End #define SKEIN512_End _libmd_SKEIN512_End #define SKEIN1024_End _libmd_SKEIN1024_End #endif #ifndef SKEIN256_Fd #define SKEIN256_Fd _libmd_SKEIN256_Fd #define SKEIN512_Fd _libmd_SKEIN512_Fd #define SKEIN1024_Fd _libmd_SKEIN1024_Fd #endif #ifndef SKEIN256_FdChunk #define SKEIN256_FdChunk _libmd_SKEIN256_FdChunk #define SKEIN512_FdChunk _libmd_SKEIN512_FdChunk #define SKEIN1024_FdChunk _libmd_SKEIN1024_FdChunk #endif #ifndef SKEIN256_File #define SKEIN256_File _libmd_SKEIN256_File #define SKEIN512_File _libmd_SKEIN512_File #define SKEIN1024_File _libmd_SKEIN1024_File #endif #ifndef SKEIN256_FileChunk #define SKEIN256_FileChunk _libmd_SKEIN256_FileChunk #define SKEIN512_FileChunk _libmd_SKEIN512_FileChunk #define SKEIN1024_FileChunk _libmd_SKEIN1024_FileChunk #endif #ifndef SKEIN256_Data #define SKEIN256_Data _libmd_SKEIN256_Data #define SKEIN512_Data _libmd_SKEIN512_Data #define SKEIN1024_Data _libmd_SKEIN1024_Data #endif __END_DECLS #endif /* ifndef _SKEIN_PORT_H_ */ Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 367308) +++ head/sys/opencrypto/crypto.c (revision 367309) @@ -1,2286 +1,2286 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; - u_int32_t cc_sessions; /* (d) # of sessions */ - u_int32_t cc_koperations; /* (d) # os asym operations */ - u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; + uint32_t cc_sessions; /* (d) # of sessions */ + uint32_t cc_koperations; /* (d) # os asym operations */ + uint8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; void *softc; struct crypto_session_params csp; }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ - u_int32_t reorder_ops; /* total ordered sym jobs received */ - u_int32_t reorder_cur_seq; /* current sym job dispatched */ + uint32_t reorder_ops; /* total ordered sym jobs received */ + uint32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; static uma_zone_t cryptoses_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, cryptostats, nitems(cryptostats), "Crypto system statistics"); #define CRYPTOSTAT_INC(stat) do { \ counter_u64_add( \ cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 1); \ } while (0) static void cryptostats_init(void *arg __unused) { COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); } SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); static void cryptostats_fini(void *arg __unused) { COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); } SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, NULL); /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof(struct cryptop), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptoses_zone = uma_zcreate("crypto_session", sizeof(struct crypto_session), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &crypto_tq); taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = mallocarray(crypto_workers_num, sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); explicit_bzero(hmac_key, sizeof(hmac_key)); } void hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptoses_zone != NULL) uma_zdestroy(cryptoses_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session->softc); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); default: return (NULL); } } static struct cryptocap * -crypto_checkdriver(u_int32_t hid) +crypto_checkdriver(uint32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static enum alg_type { ALG_NONE = 0, ALG_CIPHER, ALG_DIGEST, ALG_KEYED_DIGEST, ALG_COMPRESSION, ALG_AEAD } alg_types[] = { [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CBC] = ALG_CIPHER, [CRYPTO_SHA1] = ALG_DIGEST, [CRYPTO_NULL_HMAC] = ALG_DIGEST, [CRYPTO_NULL_CBC] = ALG_CIPHER, [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, [CRYPTO_AES_XTS] = ALG_CIPHER, [CRYPTO_AES_ICM] = ALG_CIPHER, [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, [CRYPTO_CHACHA20] = ALG_CIPHER, [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160] = ALG_DIGEST, [CRYPTO_SHA2_224] = ALG_DIGEST, [CRYPTO_SHA2_256] = ALG_DIGEST, [CRYPTO_SHA2_384] = ALG_DIGEST, [CRYPTO_SHA2_512] = ALG_DIGEST, [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_16] = ALG_AEAD, }; static enum alg_type alg_type(int alg) { if (alg < nitems(alg_types)) return (alg_types[alg]); return (ALG_NONE); } static bool alg_is_compression(int alg) { return (alg_type(alg) == ALG_COMPRESSION); } static bool alg_is_cipher(int alg) { return (alg_type(alg) == ALG_CIPHER); } static bool alg_is_digest(int alg) { return (alg_type(alg) == ALG_DIGEST || alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_keyed_digest(int alg) { return (alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_aead(int alg) { return (alg_type(alg) == ALG_AEAD); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); /* IV is optional for digests (e.g. GMAC). */ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); /* * XXX: Would be nice to have a better way to get this * value. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen > 16) return (false); break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; zfree(cses->softc, M_CRYPTO_DATA); uma_zfree(cryptoses_zone, cses); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); res->cap = cap; res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->csp = *csp; /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int -crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) +crypto_kregister(uint32_t driverid, int kalg, uint32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); gone_in_dev(cap->cc_dev, 14, "asymmetric crypto"); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int -crypto_unregister_all(u_int32_t driverid) +crypto_unregister_all(uint32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int -crypto_unblock(u_int32_t driverid, int what) +crypto_unblock(uint32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } size_t crypto_buffer_len(struct crypto_buffer *cb) { switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: return (cb->cb_buf_len); case CRYPTO_BUF_MBUF: if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); case CRYPTO_BUF_VMPAGE: return (cb->cb_vm_page_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: return (0); } } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void cb_sanity(struct crypto_buffer *cb, const char *name) { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); break; case CRYPTO_BUF_VMPAGE: KASSERT(CRYPTO_HAS_VMPAGE, ("incoming crp uses dmap on supported arch")); KASSERT(cb->cb_vm_page_len >= 0, ("incoming crp with -ve %s buffer length", name)); KASSERT(cb->cb_vm_page_offset >= 0, ("incoming crp with -ve %s buffer offset", name)); KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, ("incoming crp with %s buffer offset greater than page size" , name)); break; default: break; } } static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; struct crypto_buffer *out; size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; cb_sanity(&crp->crp_buf, "input"); ilen = crypto_buffer_len(&crp->crp_buf); olen = ilen; out = NULL; if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { cb_sanity(&crp->crp_obuf, "output"); out = &crp->crp_obuf; olen = crypto_buffer_len(out); } } else KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, ("incoming crp with separate output buffer " "but no session support")); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("GCM without a separate IV")); if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("CCM without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_aad == NULL) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, ("session doesn't support separate AAD buffer")); KASSERT(crp->crp_aad_start == 0, ("separate AAD buffer with non-zero AAD start")); KASSERT(crp->crp_aad_length != 0, ("separate AAD buffer with zero length")); } } else { KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, ("IV outside buffer length")); } /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= ilen, ("payload outside input buffer")); if (out == NULL) { KASSERT(crp->crp_payload_output_start == 0, ("payload output start non-zero without output buffer")); } else { KASSERT(crp->crp_payload_output_start < olen, ("invalid payload output start")); KASSERT(crp->crp_payload_output_start + crp->crp_payload_length <= olen, ("payload outside output buffer")); } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; else len = olen; KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif CRYPTOSTAT_INC(cs_ops); crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crp->crp_session->cap; if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; CRYPTOSTAT_INC(cs_kops); krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_destroyreq(struct cryptop *crp) { #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; crypto_destroyreq(crp); uma_zfree(cryptop_zone, crp); } static void _crypto_initreq(struct cryptop *crp, crypto_session_t cses) { crp->crp_session = cses; } void crypto_initreq(struct cryptop *crp, crypto_session_t cses) { memset(crp, 0, sizeof(*crp)); _crypto_initreq(crp, cses); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); if (crp != NULL) _crypto_initreq(crp, cses); return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) CRYPTOSTAT_INC(cs_errs); /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) CRYPTOSTAT_INC(cs_kerrs); cap = krp->krp_cap; if (cap != NULL) { CRYPTO_DRIVER_LOCK(); KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); } ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); CRYPTOSTAT_INC(cs_blocks); } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); CRYPTOSTAT_INC(cs_kblocks); } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; CRYPTOSTAT_INC(cs_intrs); } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) crpt->crp_callback(crpt); if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; CRYPTOSTAT_INC(cs_rets); } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodeflate.c =================================================================== --- head/sys/opencrypto/cryptodeflate.c (revision 367308) +++ head/sys/opencrypto/cryptodeflate.c (revision 367309) @@ -1,244 +1,244 @@ /* $OpenBSD: deflate.c,v 1.3 2001/08/20 02:45:22 hugh Exp $ */ /*- * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file contains a wrapper around the deflate algo compression * functions using the zlib library (see sys/contrib/zlib) */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(opencrypto); SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, entry, - "int", "u_int32_t"); + "int", "uint32_t"); SDT_PROBE_DEFINE6(opencrypto, deflate, deflate_global, bad, "int", "int", "int", "int", "int", "int"); SDT_PROBE_DEFINE6(opencrypto, deflate, deflate_global, iter, "int", "int", "int", "int", "int", "int"); SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, return, - "int", "u_int32_t"); + "int", "uint32_t"); int window_inflate = -1 * MAX_WBITS; int window_deflate = -12; static void * crypto_zalloc(void *nil, u_int type, u_int size) { void *ptr; ptr = malloc(type *size, M_CRYPTO_DATA, M_NOWAIT); return ptr; } static void crypto_zfree(void *nil, void *ptr) { free(ptr, M_CRYPTO_DATA); } /* * This function takes a block of data and (de)compress it using the deflate * algorithm */ -u_int32_t +uint32_t deflate_global(data, size, decomp, out) - u_int8_t *data; - u_int32_t size; + uint8_t *data; + uint32_t size; int decomp; - u_int8_t **out; + uint8_t **out; { /* decomp indicates whether we compress (0) or decompress (1) */ z_stream zbuf; - u_int8_t *output; - u_int32_t count, result; + uint8_t *output; + uint32_t count, result; int error, i; struct deflate_buf *bufh, *bufp; SDT_PROBE2(opencrypto, deflate, deflate_global, entry, decomp, size); bufh = bufp = NULL; if (!decomp) { i = 1; } else { /* * Choose a buffer with 4x the size of the input buffer * for the size of the output buffer in the case of * decompression. If it's not sufficient, it will need to be * updated while the decompression is going on. */ i = 4; } /* * Make sure we do have enough output space. Repeated calls to * deflate need at least 6 bytes of output buffer space to avoid * repeated markers. We will always provide at least 16 bytes. */ while ((size * i) < 16) i++; bufh = bufp = malloc(sizeof(*bufp) + (size_t)(size * i), M_CRYPTO_DATA, M_NOWAIT); if (bufp == NULL) { SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, 0, __LINE__, 0, 0, 0); goto bad2; } bufp->next = NULL; bufp->size = size * i; bzero(&zbuf, sizeof(z_stream)); zbuf.zalloc = crypto_zalloc; zbuf.zfree = crypto_zfree; zbuf.opaque = Z_NULL; zbuf.next_in = data; /* Data that is going to be processed. */ zbuf.avail_in = size; /* Total length of data to be processed. */ zbuf.next_out = bufp->data; zbuf.avail_out = bufp->size; error = decomp ? inflateInit2(&zbuf, window_inflate) : deflateInit2(&zbuf, Z_DEFAULT_COMPRESSION, Z_METHOD, window_deflate, Z_MEMLEVEL, Z_DEFAULT_STRATEGY); if (error != Z_OK) { SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, error, __LINE__, 0, 0, 0); goto bad; } for (;;) { error = decomp ? inflate(&zbuf, Z_SYNC_FLUSH) : deflate(&zbuf, Z_FINISH); if (error != Z_OK && error != Z_STREAM_END) { SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, error, __LINE__, zbuf.avail_in, zbuf.avail_out, zbuf.total_out); goto bad; } SDT_PROBE6(opencrypto, deflate, deflate_global, iter, decomp, error, __LINE__, zbuf.avail_in, zbuf.avail_out, zbuf.total_out); if (decomp && zbuf.avail_in == 0 && error == Z_STREAM_END) { /* Done. */ break; } else if (!decomp && error == Z_STREAM_END) { /* Done. */ break; } else if (zbuf.avail_out == 0) { struct deflate_buf *p; /* We need more output space for another iteration. */ p = malloc(sizeof(*p) + (size_t)(size * i), M_CRYPTO_DATA, M_NOWAIT); if (p == NULL) { SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, 0, __LINE__, 0, 0, 0); goto bad; } p->next = NULL; p->size = size * i; bufp->next = p; bufp = p; zbuf.next_out = bufp->data; zbuf.avail_out = bufp->size; } else { /* Unexpect result. */ SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, error, __LINE__, zbuf.avail_in, zbuf.avail_out, zbuf.total_out); goto bad; } } result = count = zbuf.total_out; *out = malloc(result, M_CRYPTO_DATA, M_NOWAIT); if (*out == NULL) { SDT_PROBE6(opencrypto, deflate, deflate_global, bad, decomp, 0, __LINE__, 0, 0, 0); goto bad; } if (decomp) inflateEnd(&zbuf); else deflateEnd(&zbuf); output = *out; for (bufp = bufh; bufp != NULL; ) { if (count > bufp->size) { struct deflate_buf *p; bcopy(bufp->data, *out, bufp->size); *out += bufp->size; count -= bufp->size; p = bufp; bufp = bufp->next; free(p, M_CRYPTO_DATA); } else { /* It should be the last buffer. */ bcopy(bufp->data, *out, count); *out += count; free(bufp, M_CRYPTO_DATA); bufp = NULL; count = 0; } } *out = output; SDT_PROBE2(opencrypto, deflate, deflate_global, return, decomp, result); return result; bad: if (decomp) inflateEnd(&zbuf); else deflateEnd(&zbuf); for (bufp = bufh; bufp != NULL; ) { struct deflate_buf *p; p = bufp; bufp = bufp->next; free(p, M_CRYPTO_DATA); } bad2: *out = NULL; return 0; } Index: head/sys/opencrypto/cryptodev.c =================================================================== --- head/sys/opencrypto/cryptodev.c (revision 367308) +++ head/sys/opencrypto/cryptodev.c (revision 367309) @@ -1,1708 +1,1708 @@ /* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ /*- * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(opencrypto); SDT_PROBE_DEFINE1(opencrypto, dev, ioctl, error, "int"/*line number*/); #ifdef COMPAT_FREEBSD32 #include #include struct session_op32 { - u_int32_t cipher; - u_int32_t mac; - u_int32_t keylen; - u_int32_t key; + uint32_t cipher; + uint32_t mac; + uint32_t keylen; + uint32_t key; int mackeylen; - u_int32_t mackey; - u_int32_t ses; + uint32_t mackey; + uint32_t ses; }; struct session2_op32 { - u_int32_t cipher; - u_int32_t mac; - u_int32_t keylen; - u_int32_t key; + uint32_t cipher; + uint32_t mac; + uint32_t keylen; + uint32_t key; int mackeylen; - u_int32_t mackey; - u_int32_t ses; + uint32_t mackey; + uint32_t ses; int crid; int pad[4]; }; struct crypt_op32 { - u_int32_t ses; - u_int16_t op; - u_int16_t flags; + uint32_t ses; + uint16_t op; + uint16_t flags; u_int len; - u_int32_t src, dst; - u_int32_t mac; - u_int32_t iv; + uint32_t src, dst; + uint32_t mac; + uint32_t iv; }; struct crypt_aead32 { - u_int32_t ses; - u_int16_t op; - u_int16_t flags; + uint32_t ses; + uint16_t op; + uint16_t flags; u_int len; u_int aadlen; u_int ivlen; - u_int32_t src; - u_int32_t dst; - u_int32_t aad; - u_int32_t tag; - u_int32_t iv; + uint32_t src; + uint32_t dst; + uint32_t aad; + uint32_t tag; + uint32_t iv; }; struct crparam32 { - u_int32_t crp_p; + uint32_t crp_p; u_int crp_nbits; }; struct crypt_kop32 { u_int crk_op; u_int crk_status; u_short crk_iparams; u_short crk_oparams; u_int crk_crid; struct crparam32 crk_param[CRK_MAXPARAM]; }; #define CIOCGSESSION32 _IOWR('c', 101, struct session_op32) #define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32) #define CIOCKEY32 _IOWR('c', 104, struct crypt_kop32) #define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32) #define CIOCKEY232 _IOWR('c', 107, struct crypt_kop32) #define CIOCCRYPTAEAD32 _IOWR('c', 109, struct crypt_aead32) static void session_op_from_32(const struct session_op32 *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTRIN_CP(*from, *to, key); CP(*from, *to, mackeylen); PTRIN_CP(*from, *to, mackey); CP(*from, *to, ses); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_from_32(const struct session2_op32 *from, struct session2_op *to) { session_op_from_32((const struct session_op32 *)from, to); CP(*from, *to, crid); } static void session_op_to_32(const struct session2_op *from, struct session_op32 *to) { CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTROUT_CP(*from, *to, key); CP(*from, *to, mackeylen); PTROUT_CP(*from, *to, mackey); CP(*from, *to, ses); } static void session2_op_to_32(const struct session2_op *from, struct session2_op32 *to) { session_op_to_32(from, (struct session_op32 *)to); CP(*from, *to, crid); } static void crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, mac); PTRIN_CP(*from, *to, iv); } static void crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, mac); PTROUT_CP(*from, *to, iv); } static void crypt_aead_from_32(const struct crypt_aead32 *from, struct crypt_aead *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, aad); PTRIN_CP(*from, *to, tag); PTRIN_CP(*from, *to, iv); } static void crypt_aead_to_32(const struct crypt_aead *from, struct crypt_aead32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, aad); PTROUT_CP(*from, *to, tag); PTROUT_CP(*from, *to, iv); } static void crparam_from_32(const struct crparam32 *from, struct crparam *to) { PTRIN_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crparam_to_32(const struct crparam *from, struct crparam32 *to) { PTROUT_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crypt_kop_from_32(const struct crypt_kop32 *from, struct crypt_kop *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_from_32(&from->crk_param[i], &to->crk_param[i]); } static void crypt_kop_to_32(const struct crypt_kop *from, struct crypt_kop32 *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_to_32(&from->crk_param[i], &to->crk_param[i]); } #endif static void session2_op_from_op(const struct session_op *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); memcpy(to, from, sizeof(*from)); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_to_op(const struct session2_op *from, struct session_op *to) { memcpy(to, from, sizeof(*to)); } struct csession { TAILQ_ENTRY(csession) next; crypto_session_t cses; volatile u_int refs; - u_int32_t ses; + uint32_t ses; struct mtx lock; /* for op submission */ struct enc_xform *txform; int hashsize; int ivsize; int mode; void *key; void *mackey; }; struct cryptop_data { struct csession *cse; char *buf; char *obuf; char *aad; bool done; }; struct fcrypt { TAILQ_HEAD(csessionlist, csession) csessions; int sesn; struct mtx lock; }; static bool use_outputbuffers; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_use_output, CTLFLAG_RW, &use_outputbuffers, 0, "Use separate output buffers for /dev/crypto requests."); static bool use_separate_aad; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_separate_aad, CTLFLAG_RW, &use_separate_aad, 0, "Use separate AAD buffer for /dev/crypto requests."); static struct timeval warninterval = { .tv_sec = 60, .tv_usec = 0 }; SYSCTL_TIMEVAL_SEC(_kern, OID_AUTO, cryptodev_warn_interval, CTLFLAG_RW, &warninterval, "Delay in seconds between warnings of deprecated /dev/crypto algorithms"); static int cryptof_ioctl(struct file *, u_long, void *, struct ucred *, struct thread *); static int cryptof_stat(struct file *, struct stat *, struct ucred *, struct thread *); static int cryptof_close(struct file *, struct thread *); static int cryptof_fill_kinfo(struct file *, struct kinfo_file *, struct filedesc *); static struct fileops cryptofops = { .fo_read = invfo_rdwr, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_ioctl = cryptof_ioctl, .fo_poll = invfo_poll, .fo_kqfilter = invfo_kqfilter, .fo_stat = cryptof_stat, .fo_close = cryptof_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = cryptof_fill_kinfo, }; static struct csession *csefind(struct fcrypt *, u_int); static bool csedelete(struct fcrypt *, u_int); static struct csession *csecreate(struct fcrypt *, crypto_session_t, struct crypto_session_params *, struct enc_xform *, void *, struct auth_hash *, void *); static void csefree(struct csession *); static int cryptodev_op(struct csession *, struct crypt_op *, struct ucred *, struct thread *td); static int cryptodev_aead(struct csession *, struct crypt_aead *, struct ucred *, struct thread *); static int cryptodev_key(struct crypt_kop *); static int cryptodev_find(struct crypt_find_op *); /* * Check a crypto identifier to see if it requested * a software device/driver. This can be done either * by device name/class or through search constraints. */ static int checkforsoftware(int *cridp) { int crid; crid = *cridp; if (!crypto_devallowsoft) { if (crid & CRYPTOCAP_F_SOFTWARE) { if (crid & CRYPTOCAP_F_HARDWARE) { *cridp = CRYPTOCAP_F_HARDWARE; return 0; } return EINVAL; } if ((crid & CRYPTOCAP_F_HARDWARE) == 0 && (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0) return EINVAL; } return 0; } /* ARGSUSED */ static int cryptof_ioctl( struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { static struct timeval keywarn, featwarn; struct crypto_session_params csp; struct fcrypt *fcr = fp->f_data; struct csession *cse; struct session2_op *sop; struct crypt_op *cop; struct crypt_aead *caead; struct enc_xform *txform = NULL; struct auth_hash *thash = NULL; void *key = NULL; void *mackey = NULL; struct crypt_kop *kop; crypto_session_t cses; - u_int32_t ses; + uint32_t ses; int error = 0, crid; union { struct session2_op sopc; #ifdef COMPAT_FREEBSD32 struct crypt_op copc; struct crypt_aead aeadc; struct crypt_kop kopc; #endif } thunk; #ifdef COMPAT_FREEBSD32 u_long cmd32; void *data32; cmd32 = 0; data32 = NULL; switch (cmd) { case CIOCGSESSION32: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION; data = &thunk.sopc; session_op_from_32((struct session_op32 *)data32, &thunk.sopc); break; case CIOCGSESSION232: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION2; data = &thunk.sopc; session2_op_from_32((struct session2_op32 *)data32, &thunk.sopc); break; case CIOCCRYPT32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPT; data = &thunk.copc; crypt_op_from_32((struct crypt_op32 *)data32, &thunk.copc); break; case CIOCCRYPTAEAD32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPTAEAD; data = &thunk.aeadc; crypt_aead_from_32((struct crypt_aead32 *)data32, &thunk.aeadc); break; case CIOCKEY32: case CIOCKEY232: cmd32 = cmd; data32 = data; if (cmd == CIOCKEY32) cmd = CIOCKEY; else cmd = CIOCKEY2; data = &thunk.kopc; crypt_kop_from_32((struct crypt_kop32 *)data32, &thunk.kopc); break; } #endif switch (cmd) { case CIOCGSESSION: case CIOCGSESSION2: if (cmd == CIOCGSESSION) { session2_op_from_op(data, &thunk.sopc); sop = &thunk.sopc; } else sop = (struct session2_op *)data; switch (sop->cipher) { case 0: break; case CRYPTO_AES_CBC: txform = &enc_xform_rijndael128; break; case CRYPTO_AES_XTS: txform = &enc_xform_aes_xts; break; case CRYPTO_NULL_CBC: txform = &enc_xform_null; break; case CRYPTO_CAMELLIA_CBC: txform = &enc_xform_camellia; break; case CRYPTO_AES_ICM: txform = &enc_xform_aes_icm; break; case CRYPTO_AES_NIST_GCM_16: txform = &enc_xform_aes_nist_gcm; break; case CRYPTO_CHACHA20: txform = &enc_xform_chacha20; break; case CRYPTO_AES_CCM_16: txform = &enc_xform_ccm; break; default: CRYPTDEB("invalid cipher"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } switch (sop->mac) { case 0: break; case CRYPTO_POLY1305: thash = &auth_hash_poly1305; break; case CRYPTO_SHA1_HMAC: thash = &auth_hash_hmac_sha1; break; case CRYPTO_SHA2_224_HMAC: thash = &auth_hash_hmac_sha2_224; break; case CRYPTO_SHA2_256_HMAC: thash = &auth_hash_hmac_sha2_256; break; case CRYPTO_SHA2_384_HMAC: thash = &auth_hash_hmac_sha2_384; break; case CRYPTO_SHA2_512_HMAC: thash = &auth_hash_hmac_sha2_512; break; case CRYPTO_RIPEMD160_HMAC: thash = &auth_hash_hmac_ripemd_160; break; #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: /* Should always be paired with GCM. */ if (sop->cipher != CRYPTO_AES_NIST_GCM_16) { CRYPTDEB("GMAC without GCM"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case CRYPTO_AES_NIST_GMAC: switch (sop->mackeylen * 8) { case 128: thash = &auth_hash_nist_gmac_aes_128; break; case 192: thash = &auth_hash_nist_gmac_aes_192; break; case 256: thash = &auth_hash_nist_gmac_aes_256; break; default: CRYPTDEB("invalid GMAC key length"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_AES_CCM_CBC_MAC: switch (sop->mackeylen) { case 16: thash = &auth_hash_ccm_cbc_mac_128; break; case 24: thash = &auth_hash_ccm_cbc_mac_192; break; case 32: thash = &auth_hash_ccm_cbc_mac_256; break; default: CRYPTDEB("Invalid CBC MAC key size %d", sop->keylen); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_SHA1: thash = &auth_hash_sha1; break; case CRYPTO_SHA2_224: thash = &auth_hash_sha2_224; break; case CRYPTO_SHA2_256: thash = &auth_hash_sha2_256; break; case CRYPTO_SHA2_384: thash = &auth_hash_sha2_384; break; case CRYPTO_SHA2_512: thash = &auth_hash_sha2_512; break; case CRYPTO_NULL_HMAC: thash = &auth_hash_null; break; case CRYPTO_BLAKE2B: thash = &auth_hash_blake2b; break; case CRYPTO_BLAKE2S: thash = &auth_hash_blake2s; break; default: CRYPTDEB("invalid mac"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } if (txform == NULL && thash == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } memset(&csp, 0, sizeof(csp)); if (use_outputbuffers) csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; if (sop->cipher == CRYPTO_AES_NIST_GCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (sop->cipher == CRYPTO_AES_CCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_CCM_CBC_MAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } thash = NULL; break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (txform && thash) csp.csp_mode = CSP_MODE_ETA; else if (txform) csp.csp_mode = CSP_MODE_CIPHER; else csp.csp_mode = CSP_MODE_DIGEST; switch (csp.csp_mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: if (use_separate_aad) csp.csp_flags |= CSP_F_SEPARATE_AAD; break; } if (txform) { csp.csp_cipher_alg = txform->type; csp.csp_cipher_klen = sop->keylen; if (sop->keylen > txform->maxkey || sop->keylen < txform->minkey) { CRYPTDEB("invalid cipher parameters"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } key = malloc(csp.csp_cipher_klen, M_XDATA, M_WAITOK); error = copyin(sop->key, key, csp.csp_cipher_klen); if (error) { CRYPTDEB("invalid key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_cipher_key = key; csp.csp_ivlen = txform->ivsize; } if (thash) { csp.csp_auth_alg = thash->type; csp.csp_auth_klen = sop->mackeylen; if (sop->mackeylen > thash->keysize || sop->mackeylen < 0) { CRYPTDEB("invalid mac key length"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (csp.csp_auth_klen) { mackey = malloc(csp.csp_auth_klen, M_XDATA, M_WAITOK); error = copyin(sop->mackey, mackey, csp.csp_auth_klen); if (error) { CRYPTDEB("invalid mac key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_auth_key = mackey; } if (csp.csp_auth_alg == CRYPTO_AES_NIST_GMAC) csp.csp_ivlen = AES_GCM_IV_LEN; if (csp.csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC) csp.csp_ivlen = AES_CCM_IV_LEN; } crid = sop->crid; error = checkforsoftware(&crid); if (error) { CRYPTDEB("checkforsoftware"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = crypto_newsession(&cses, &csp, crid); if (error) { CRYPTDEB("crypto_newsession"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } cse = csecreate(fcr, cses, &csp, txform, key, thash, mackey); if (cse == NULL) { crypto_freesession(cses); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); CRYPTDEB("csecreate"); goto bail; } sop->ses = cse->ses; /* return hardware/driver id */ sop->crid = crypto_ses2hid(cse->cses); bail: if (error) { free(key, M_XDATA); free(mackey, M_XDATA); } if (cmd == CIOCGSESSION && error == 0) session2_op_to_op(sop, data); break; case CIOCFSESSION: - ses = *(u_int32_t *)data; + ses = *(uint32_t *)data; if (!csedelete(fcr, ses)) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CIOCCRYPT: cop = (struct crypt_op *)data; cse = csefind(fcr, cop->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_op(cse, cop, active_cred, td); csefree(cse); break; case CIOCKEY: case CIOCKEY2: if (ratecheck(&keywarn, &warninterval)) gone_in(14, "Asymmetric crypto operations via /dev/crypto"); if (!crypto_userasymcrypto) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EPERM); /* XXX compat? */ } kop = (struct crypt_kop *)data; if (cmd == CIOCKEY) { /* NB: crypto core enforces s/w driver use */ kop->crk_crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; } mtx_lock(&Giant); error = cryptodev_key(kop); mtx_unlock(&Giant); break; case CIOCASYMFEAT: if (ratecheck(&featwarn, &warninterval)) gone_in(14, "Asymmetric crypto features via /dev/crypto"); if (!crypto_userasymcrypto) { /* * NB: if user asym crypto operations are * not permitted return "no algorithms" * so well-behaved applications will just * fallback to doing them in software. */ *(int *)data = 0; } else { error = crypto_getfeat((int *)data); if (error) SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); } break; case CIOCFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CIOCCRYPTAEAD: caead = (struct crypt_aead *)data; cse = csefind(fcr, caead->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_aead(cse, caead, active_cred, td); csefree(cse); break; default: error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); break; } #ifdef COMPAT_FREEBSD32 switch (cmd32) { case CIOCGSESSION32: if (error == 0) session_op_to_32(data, data32); break; case CIOCGSESSION232: if (error == 0) session2_op_to_32(data, data32); break; case CIOCCRYPT32: if (error == 0) crypt_op_to_32(data, data32); break; case CIOCCRYPTAEAD32: if (error == 0) crypt_aead_to_32(data, data32); break; case CIOCKEY32: case CIOCKEY232: crypt_kop_to_32(data, data32); break; } #endif return (error); } static int cryptodev_cb(struct cryptop *); static struct cryptop_data * cod_alloc(struct csession *cse, size_t aad_len, size_t len, struct thread *td) { struct cryptop_data *cod; cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO); cod->cse = cse; if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_AAD) { if (aad_len != 0) cod->aad = malloc(aad_len, M_XDATA, M_WAITOK); cod->buf = malloc(len, M_XDATA, M_WAITOK); } else cod->buf = malloc(aad_len + len, M_XDATA, M_WAITOK); if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_OUTPUT) cod->obuf = malloc(len, M_XDATA, M_WAITOK); return (cod); } static void cod_free(struct cryptop_data *cod) { free(cod->aad, M_XDATA); free(cod->obuf, M_XDATA); free(cod->buf, M_XDATA); free(cod, M_XDATA); } static int cryptodev_op( struct csession *cse, struct crypt_op *cop, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; int error; if (cop->len > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform) { if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } if (cop->mac && cse->hashsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (cop->flags & COP_F_CIPHER_FIRST) { if (cop->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, 0, cop->len + cse->hashsize, td); crp = crypto_getreq(cse->cses, M_WAITOK); error = copyin(cop->src, cod->buf, cop->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_start = 0; crp->crp_payload_length = cop->len; if (cse->hashsize) crp->crp_digest_start = cop->len; switch (cse->mode) { case CSP_MODE_COMPRESS: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_COMPRESS; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECOMPRESS; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_CIPHER: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_DIGEST: switch (cop->op) { case 0: case COP_ENCRYPT: case COP_DECRYPT: crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; if (cod->obuf != NULL) crp->crp_digest_start = 0; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_ETA: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, cop->len + cse->hashsize); if (cod->obuf) crypto_use_output_buf(crp, cod->obuf, cop->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (cop->iv) { if (cse->ivsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } error = copyin(cop->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else if (cse->ivsize != 0) { crp->crp_iv_start = 0; crp->crp_payload_start += cse->ivsize; crp->crp_payload_length -= cse->ivsize; cop->dst += cse->ivsize; } if (cop->mac != NULL && crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(cop->mac, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = crp->crp_etype; goto bail; } if (cop->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, cop->dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, cop->mac, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_aead( struct csession *cse, struct crypt_aead *caead, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; int error; if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform == NULL || cse->hashsize == 0 || caead->tag == NULL || (caead->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (caead->flags & COP_F_CIPHER_FIRST) { if (caead->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, caead->aadlen, caead->len + cse->hashsize, td); crp = crypto_getreq(cse->cses, M_WAITOK); if (cod->aad != NULL) error = copyin(caead->aad, cod->aad, caead->aadlen); else error = copyin(caead->aad, cod->buf, caead->aadlen); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_aad = cod->aad; crp->crp_aad_start = 0; crp->crp_aad_length = caead->aadlen; if (cod->aad != NULL) crp->crp_payload_start = 0; else crp->crp_payload_start = caead->aadlen; error = copyin(caead->src, cod->buf + crp->crp_payload_start, caead->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_length = caead->len; if (caead->op == COP_ENCRYPT && cod->obuf != NULL) crp->crp_digest_start = crp->crp_payload_output_start + caead->len; else crp->crp_digest_start = crp->crp_payload_start + caead->len; switch (cse->mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: switch (caead->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, crp->crp_payload_start + caead->len + cse->hashsize); if (cod->obuf != NULL) crypto_use_output_buf(crp, cod->obuf, caead->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (caead->iv) { /* * Permit a 16-byte IV for AES-XTS, but only use the * first 8 bytes as a block number. */ if (cse->mode == CSP_MODE_ETA && caead->ivlen == AES_BLOCK_LEN && cse->ivsize == AES_XTS_IV_LEN) caead->ivlen = AES_XTS_IV_LEN; if (caead->ivlen != cse->ivsize) { error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = copyin(caead->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else { crp->crp_iv_start = crp->crp_payload_start; crp->crp_payload_start += cse->ivsize; crp->crp_payload_length -= cse->ivsize; caead->dst += cse->ivsize; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(caead->tag, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { error = crp->crp_etype; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (caead->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, caead->dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, caead->tag, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_cb(struct cryptop *crp) { struct cryptop_data *cod = crp->crp_opaque; /* * Lock to ensure the wakeup() is not missed by the loops * waiting on cod->done in cryptodev_op() and * cryptodev_aead(). */ mtx_lock(&cod->cse->lock); cod->done = true; mtx_unlock(&cod->cse->lock); wakeup(cod); return (0); } static void cryptodevkey_cb(struct cryptkop *krp) { wakeup_one(krp); } static int cryptodev_key(struct crypt_kop *kop) { struct cryptkop *krp = NULL; int error = EINVAL; int in, out, size, i; if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EFBIG); } in = kop->crk_iparams; out = kop->crk_oparams; switch (kop->crk_op) { case CRK_MOD_EXP: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_MOD_EXP_CRT: if (in == 6 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_SIGN: if (in == 5 && out == 2) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_VERIFY: if (in == 7 && out == 0) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DH_COMPUTE_KEY: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } krp = malloc(sizeof(*krp), M_XDATA, M_WAITOK | M_ZERO); krp->krp_op = kop->crk_op; krp->krp_status = kop->crk_status; krp->krp_iparams = kop->crk_iparams; krp->krp_oparams = kop->crk_oparams; krp->krp_crid = kop->crk_crid; krp->krp_status = 0; krp->krp_callback = cryptodevkey_cb; for (i = 0; i < CRK_MAXPARAM; i++) { if (kop->crk_param[i].crp_nbits > 65536) { /* Limit is the same as in OpenBSD */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; } for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; krp->krp_param[i].crp_p = malloc(size, M_XDATA, M_WAITOK); if (i >= krp->krp_iparams) continue; error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } error = crypto_kdispatch(krp); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } error = tsleep(krp, PSOCK, "crydev", 0); if (error) { /* XXX can this happen? if so, how do we recover? */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } kop->crk_crid = krp->krp_hid; /* device that did the work */ if (krp->krp_status != 0) { error = krp->krp_status; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } fail: if (krp) { kop->crk_status = krp->krp_status; for (i = 0; i < CRK_MAXPARAM; i++) { if (krp->krp_param[i].crp_p) free(krp->krp_param[i].crp_p, M_XDATA); } free(krp, M_XDATA); } return (error); } static int cryptodev_find(struct crypt_find_op *find) { device_t dev; size_t fnlen = sizeof find->name; if (find->crid != -1) { dev = crypto_find_device_byhid(find->crid); if (dev == NULL) return (ENOENT); strncpy(find->name, device_get_nameunit(dev), fnlen); find->name[fnlen - 1] = '\x0'; } else { find->name[fnlen - 1] = '\x0'; find->crid = crypto_find_driver(find->name); if (find->crid == -1) return (ENOENT); } return (0); } /* ARGSUSED */ static int cryptof_stat( struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } /* ARGSUSED */ static int cryptof_close(struct file *fp, struct thread *td) { struct fcrypt *fcr = fp->f_data; struct csession *cse; while ((cse = TAILQ_FIRST(&fcr->csessions))) { TAILQ_REMOVE(&fcr->csessions, cse, next); KASSERT(cse->refs == 1, ("%s: crypto session %p with %d refs", __func__, cse, cse->refs)); csefree(cse); } free(fcr, M_XDATA); fp->f_data = NULL; return 0; } static int cryptof_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_CRYPTO; return (0); } static struct csession * csefind(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { refcount_acquire(&cse->refs); mtx_unlock(&fcr->lock); return (cse); } } mtx_unlock(&fcr->lock); return (NULL); } static bool csedelete(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { TAILQ_REMOVE(&fcr->csessions, cse, next); mtx_unlock(&fcr->lock); csefree(cse); return (true); } } mtx_unlock(&fcr->lock); return (false); } struct csession * csecreate(struct fcrypt *fcr, crypto_session_t cses, struct crypto_session_params *csp, struct enc_xform *txform, void *key, struct auth_hash *thash, void *mackey) { struct csession *cse; cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO); if (cse == NULL) return NULL; mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF); refcount_init(&cse->refs, 1); cse->key = key; cse->mackey = mackey; cse->mode = csp->csp_mode; cse->cses = cses; cse->txform = txform; if (thash != NULL) cse->hashsize = thash->hashsize; else if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) cse->hashsize = AES_GMAC_HASH_LEN; else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) cse->hashsize = AES_CBC_MAC_HASH_LEN; cse->ivsize = csp->csp_ivlen; mtx_lock(&fcr->lock); TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); cse->ses = fcr->sesn++; mtx_unlock(&fcr->lock); return (cse); } static void csefree(struct csession *cse) { if (!refcount_release(&cse->refs)) return; crypto_freesession(cse->cses); mtx_destroy(&cse->lock); if (cse->key) free(cse->key, M_XDATA); if (cse->mackey) free(cse->mackey, M_XDATA); free(cse, M_XDATA); } static int cryptoioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct file *f; struct fcrypt *fcr; int fd, error; switch (cmd) { case CRIOGET: error = falloc_noinstall(td, &f); if (error) break; fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK | M_ZERO); TAILQ_INIT(&fcr->csessions); mtx_init(&fcr->lock, "fcrypt", NULL, MTX_DEF); finit(f, FREAD | FWRITE, DTYPE_CRYPTO, fcr, &cryptofops); error = finstall(td, f, &fd, 0, NULL); if (error) { mtx_destroy(&fcr->lock); free(fcr, M_XDATA); } else *(uint32_t *)data = fd; fdrop(f, td); break; case CRIOFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CRIOASYMFEAT: error = crypto_getfeat((int *)data); break; default: error = EINVAL; break; } return (error); } static struct cdevsw crypto_cdevsw = { .d_version = D_VERSION, .d_ioctl = cryptoioctl, .d_name = "crypto", }; static struct cdev *crypto_dev; /* * Initialization code, both for static and dynamic loading. */ static int cryptodev_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("crypto: \n"); crypto_dev = make_dev(&crypto_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "crypto"); return 0; case MOD_UNLOAD: /*XXX disallow if active sessions */ destroy_dev(crypto_dev); return 0; } return EINVAL; } static moduledata_t cryptodev_mod = { "cryptodev", cryptodev_modevent, 0 }; MODULE_VERSION(cryptodev, 1); DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_DEPEND(cryptodev, crypto, 1, 1, 1); MODULE_DEPEND(cryptodev, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.h =================================================================== --- head/sys/opencrypto/cryptodev.h (revision 367308) +++ head/sys/opencrypto/cryptodev.h (revision 367309) @@ -1,737 +1,737 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #ifdef _KERNEL #include #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 /* Hash values */ #define NULL_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define AES_GMAC_HASH_LEN 16 #define POLY1305_HASH_LEN 16 #define AES_CBC_MAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 #define AES_128_CBC_MAC_KEY_LEN 16 #define AES_192_CBC_MAC_KEY_LEN 24 #define AES_256_CBC_MAC_KEY_LEN 32 #define POLY1305_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define AES_GCM_IV_LEN 12 #define AES_CCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define CAMELLIA_MIN_KEY 16 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #ifdef _KERNEL #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #endif #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_POLY1305 38 #define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */ #define CRYPTO_AES_CCM_16 40 /* cipher side */ #define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* Does the kernel support vmpage buffers on this platform? */ #ifdef __powerpc__ #define CRYPTO_MAY_HAVE_VMPAGE 1 #else #define CRYPTO_MAY_HAVE_VMPAGE ( PMAP_HAS_DMAP ) #endif /* Does the currently running system support vmpage buffers on this platform? */ #define CRYPTO_HAS_VMPAGE ( PMAP_HAS_DMAP ) /* NB: deprecated */ struct session_op { - u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ - u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ + uint32_t cipher; /* ie. CRYPTO_AES_CBC */ + uint32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ - u_int32_t keylen; /* cipher key */ + uint32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; - u_int32_t ses; /* returns: session # */ + uint32_t ses; /* returns: session # */ }; /* * session and crypt _op structs are used by userspace programs to interact * with /dev/crypto. Confusingly, the internal kernel interface is named * "cryptop" (no underscore). */ struct session2_op { - u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ - u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ + uint32_t cipher; /* ie. CRYPTO_AES_CBC */ + uint32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ - u_int32_t keylen; /* cipher key */ + uint32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; - u_int32_t ses; /* returns: session # */ + uint32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { - u_int32_t ses; - u_int16_t op; /* i.e. COP_ENCRYPT */ + uint32_t ses; + uint16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 - u_int16_t flags; + uint16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; caddr_t mac; /* must be big enough for chosen MAC */ c_caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { - u_int32_t ses; - u_int16_t op; /* i.e. COP_ENCRYPT */ - u_int16_t flags; + uint32_t ses; + uint16_t op; /* i.e. COP_ENCRYPT */ + uint16_t flags; u_int len; u_int aadlen; u_int ivlen; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; c_caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ c_caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ -#define CRIOGET _IOWR('c', 100, u_int32_t) +#define CRIOGET _IOWR('c', 100, uint32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) -#define CIOCFSESSION _IOW('c', 102, u_int32_t) +#define CIOCFSESSION _IOW('c', 102, uint32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) -#define CIOCASYMFEAT _IOR('c', 105, u_int32_t) +#define CIOCASYMFEAT _IOR('c', 105, uint32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptostats { uint64_t cs_ops; /* symmetric crypto ops submitted */ uint64_t cs_errs; /* symmetric crypto ops that failed */ uint64_t cs_kops; /* asymetric/key ops submitted */ uint64_t cs_kerrs; /* asymetric/key ops that failed */ uint64_t cs_intrs; /* crypto swi thread activations */ uint64_t cs_rets; /* crypto return thread activations */ uint64_t cs_blocks; /* symmetric op driver block */ uint64_t cs_kblocks; /* symmetric op driver block */ }; #ifdef _KERNEL /* * Return values for cryptodev_probesession methods. */ #define CRYPTODEV_PROBE_HARDWARE (-100) #define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200) #define CRYPTODEV_PROBE_SOFTWARE (-500) #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif struct crypto_session_params { int csp_mode; /* Type of operations to perform. */ #define CSP_MODE_NONE 0 #define CSP_MODE_COMPRESS 1 /* Compression/decompression. */ #define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */ #define CSP_MODE_DIGEST 3 /* Compute/verify digest. */ #define CSP_MODE_AEAD 4 /* Combined auth/encryption. */ #define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */ int csp_flags; #define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ #define CSP_F_SEPARATE_AAD 0x0002 /* Requests can use separate AAD */ #define CSP_F_ESN 0x0004 /* Requests can use seperate ESN field */ int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ const void *csp_cipher_key; int csp_auth_alg; int csp_auth_klen; /* Key length in bytes. */ const void *csp_auth_key; int csp_auth_mlen; /* Number of digest bytes to use. 0 means all. */ }; enum crypto_buffer_type { CRYPTO_BUF_NONE = 0, CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, CRYPTO_BUF_VMPAGE, CRYPTO_BUF_LAST = CRYPTO_BUF_VMPAGE }; /* * Description of a data buffer for a request. Requests can either * have a single buffer that is modified in place or separate input * and output buffers. */ struct crypto_buffer { union { struct { char *cb_buf; int cb_buf_len; }; struct mbuf *cb_mbuf; struct { vm_page_t *cb_vm_page; int cb_vm_page_len; int cb_vm_page_offset; }; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; }; /* * A cursor is used to iterate through a crypto request data buffer. */ struct crypto_buffer_cursor { union { char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; vm_page_t *cc_vmpage; }; /* Optional bytes of valid data remaining */ int cc_buf_len; /* * Optional offset within the current buffer segment where * valid data begins */ size_t cc_offset; enum crypto_buffer_type cc_type; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; crypto_session_t crp_session; /* Session */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ #define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads * if op is synchronous */ #define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* * Dispatch the crypto jobs in the same * order there are submitted. Applied only * if CRYPTO_F_ASYNC flags is set */ #define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */ int crp_op; struct crypto_buffer crp_buf; struct crypto_buffer crp_obuf; void *crp_aad; /* AAD buffer. */ int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ uint8_t crp_esn[4]; /* high-order ESN */ int crp_iv_start; /* Location of IV. IV length is from * the session. */ int crp_payload_start; /* Location of ciphertext. */ int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. */ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */ const void *crp_cipher_key; /* New cipher key if non-NULL. */ const void *crp_auth_key; /* New auth key if non-NULL. */ void *crp_opaque; /* Opaque pointer, passed along */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; static __inline void _crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) { cb->cb_buf = buf; cb->cb_buf_len = len; cb->cb_type = CRYPTO_BUF_CONTIG; } static __inline void _crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) { cb->cb_mbuf = m; cb->cb_type = CRYPTO_BUF_MBUF; } static __inline void _crypto_use_vmpage(struct crypto_buffer *cb, vm_page_t *pages, int len, int offset) { cb->cb_vm_page = pages; cb->cb_vm_page_len = len; cb->cb_vm_page_offset = offset; cb->cb_type = CRYPTO_BUF_VMPAGE; } static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; cb->cb_type = CRYPTO_BUF_UIO; } static __inline void crypto_use_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_buf, buf, len); } static __inline void crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_buf, m); } static __inline void crypto_use_vmpage(struct cryptop *crp, vm_page_t *pages, int len, int offset) { _crypto_use_vmpage(&crp->crp_buf, pages, len, offset); } static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); } static __inline void crypto_use_output_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_obuf, buf, len); } static __inline void crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); } static __inline void crypto_use_output_vmpage(struct cryptop *crp, vm_page_t *pages, int len, int offset) { _crypto_use_vmpage(&crp->crp_obuf, pages, len, offset); } static __inline void crypto_use_output_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_obuf, uio); } #define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 #define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT) #define CRYPTO_OP_COMPUTE_DIGEST 0x0 #define CRYPTO_OP_VERIFY_DIGEST 0x2 #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ uint32_t krp_hid; /* device used */ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ void (*krp_callback)(struct cryptkop *); struct cryptocap *krp_cap; }; uint32_t crypto_ses2hid(crypto_session_t crypto_session); uint32_t crypto_ses2caps(crypto_session_t crypto_session); void *crypto_get_driver_session(crypto_session_t crypto_session); const struct crypto_session_params *crypto_get_params( crypto_session_t crypto_session); struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp); struct enc_xform *crypto_cipher(const struct crypto_session_params *csp); MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *params, int hard); extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ #define CRYPTOCAP_F_ACCEL_SOFTWARE 0x08000000 extern int32_t crypto_get_driverid(device_t dev, size_t session_size, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); -extern int crypto_kregister(u_int32_t, int, u_int32_t); -extern int crypto_unregister_all(u_int32_t driverid); +extern int crypto_kregister(uint32_t, int, uint32_t); +extern int crypto_unregister_all(uint32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 -extern int crypto_unblock(u_int32_t, int); +extern int crypto_unblock(uint32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_destroyreq(struct cryptop *crp); extern void crypto_initreq(struct cryptop *crp, crypto_session_t cses); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(crypto_session_t cses, int how); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ #ifdef SYSCTL_DECL SYSCTL_DECL(_kern_crypto); #endif /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; void hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx); void hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx); /* * Crypto-related utility routines used mainly by drivers. * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, size_t len); size_t crypto_buffer_len(struct crypto_buffer *cb); void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb); void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, const void *vsrc); void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst); void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, void *vdst); static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { const struct crypto_session_params *csp; csp = crypto_get_params(crp->crp_session); if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) memcpy(iv, crp->crp_iv, csp->csp_ivlen); else crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); } #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ Index: head/sys/opencrypto/cryptosoft.c =================================================================== --- head/sys/opencrypto/cryptosoft.c (revision 367308) +++ head/sys/opencrypto/cryptosoft.c (revision 367309) @@ -1,1512 +1,1512 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; struct auth_hash *sw_axf; uint16_t sw_mlen; }; struct swcr_encdec { void *sw_kschedule; struct enc_xform *sw_exf; }; struct swcr_compdec { struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; int i, blks, inlen, ivlen, outlen, resid; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *inblk; unsigned char *outblk; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; ivlen = exf->ivsize; if (exf->native_blocksize == 0) { /* Check for non-padded data */ if ((crp->crp_payload_length % exf->blocksize) != 0) return (EINVAL); blks = exf->blocksize; } else blks = exf->native_blocksize; if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); if (crp->crp_cipher_key != NULL) { csp = crypto_get_params(crp->crp_session); error = exf->setkey(sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } crypto_read_iv(crp, iv); if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv); } ivp = iv; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inlen = crypto_cursor_seglen(&cc_in); inblk = crypto_cursor_segbase(&cc_in); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outlen = crypto_cursor_seglen(&cc_out); outblk = crypto_cursor_segbase(&cc_out); resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); /* * Loop through encrypting blocks. 'inlen' is the remaining * length of the current segment in the input buffer. * 'outlen' is the remaining length of current segment in the * output buffer. */ while (resid >= blks) { /* * If the current block is not contained within the * current input/output segment, use 'blk' as a local * buffer. */ if (inlen < blks) { crypto_cursor_copydata(&cc_in, blks, blk); inblk = blk; } if (outlen < blks) outblk = blk; /* * Ciphers without a 'reinit' hook are assumed to be * used in CBC mode where the chaining is done here. */ if (exf->reinit != NULL) { if (encrypting) exf->encrypt(sw->sw_kschedule, inblk, outblk); else exf->decrypt(sw->sw_kschedule, inblk, outblk); } else if (encrypting) { /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] = inblk[i] ^ ivp[i]; exf->encrypt(sw->sw_kschedule, outblk, outblk); /* * Keep encrypted block for XOR'ing * with next block */ memcpy(iv, outblk, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; memcpy(nivp, inblk, blks); exf->decrypt(sw->sw_kschedule, inblk, outblk); /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] ^= ivp[i]; ivp = nivp; } if (inlen < blks) { inlen = crypto_cursor_seglen(&cc_in); inblk = crypto_cursor_segbase(&cc_in); } else { crypto_cursor_advance(&cc_in, blks); inlen -= blks; inblk += blks; } if (outlen < blks) { crypto_cursor_copyback(&cc_out, blks, blk); outlen = crypto_cursor_seglen(&cc_out); outblk = crypto_cursor_segbase(&cc_out); } else { crypto_cursor_advance(&cc_out, blks); outlen -= blks; outblk += blks; } resid -= blks; } /* Handle trailing partial block for stream ciphers. */ if (resid > 0) { KASSERT(exf->native_blocksize != 0, ("%s: partial block of %d bytes for cipher %s", __func__, i, exf->name)); KASSERT(exf->reinit != NULL, ("%s: partial block cipher %s without reinit hook", __func__, exf->name)); KASSERT(resid < blks, ("%s: partial block too big", __func__)); inlen = crypto_cursor_seglen(&cc_in); outlen = crypto_cursor_seglen(&cc_out); if (inlen < resid) { crypto_cursor_copydata(&cc_in, resid, blk); inblk = blk; } else inblk = crypto_cursor_segbase(&cc_in); if (outlen < resid) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); if (encrypting) exf->encrypt_last(sw->sw_kschedule, inblk, outblk, resid); else exf->decrypt_last(sw->sw_kschedule, inblk, outblk, resid); if (outlen < resid) crypto_cursor_copyback(&cc_out, resid, blk); } explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(iv2, sizeof(iv2)); return (0); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; csp = crypto_get_params(crp->crp_session); if (crp->crp_auth_key != NULL) { swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); if (crp->crp_aad != NULL) err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (err) goto out; if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) err = crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, axf->Update, &ctx); else err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (err) goto out; if (csp->csp_flags & CSP_F_ESN) axf->Update(&ctx, crp->crp_esn, 4); axf->Final(aalg, &ctx); if (sw->sw_octx != NULL) { bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char uaalg[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) err = EBADMSG; explicit_bzero(uaalg, sizeof(uaalg)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } explicit_bzero(aalg, sizeof(aalg)); out: explicit_bzero(&ctx, sizeof(ctx)); return (err); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc; const u_char *inblk; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; int blksz, error, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); crypto_cursor_init(&cc, &crp->crp_buf); crypto_cursor_advance(&cc, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { len = crypto_cursor_seglen(&cc); if (len >= blksz) { inblk = crypto_cursor_segbase(&cc); len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc, len); } else { len = blksz; crypto_cursor_copydata(&cc, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc, resid, blk); axf->Update(&ctx, blk, blksz); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag2)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; int blksz, error, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) { len = rounddown(crp->crp_aad_length, blksz); if (len != 0) axf->Update(&ctx, crp->crp_aad, len); if (crp->crp_aad_length != len) { memset(blk, 0, blksz); memcpy(blk, (char *)crp->crp_aad + len, crp->crp_aad_length - len); axf->Update(&ctx, blk, blksz); } } else { crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_aad_start); for (resid = crp->crp_aad_length; resid >= blksz; resid -= len) { len = crypto_cursor_seglen(&cc_in); if (len >= blksz) { inblk = crypto_cursor_segbase(&cc_in); len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc_in, len); } else { len = blksz; crypto_cursor_copydata(&cc_in, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc_in, resid, blk); axf->Update(&ctx, blk, blksz); } } exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->encrypt(swe->sw_kschedule, inblk, outblk); axf->Update(&ctx, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { axf->Update(&ctx, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } axf->Update(&ctx, blk, resid); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { u_char tag[AES_CBC_MAC_HASH_LEN]; u_char iv[AES_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; int error, ivlen; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; crypto_read_iv(crp, iv); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error) return (error); /* Finalize MAC */ axf->Final(tag, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[AES_CBC_MAC_HASH_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; int blksz, error, ivlen, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = AES_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error) return (error); exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); axf->Update(&ctx, inblk, blksz); exf->encrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unencrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, inblk, blk); axf->Update(&ctx, blk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, resid); exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } else { exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); axf->Update(&ctx, blk, resid); } } /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { - u_int8_t *data, *out; + uint8_t *data, *out; struct comp_algo *cxf; int adj; - u_int32_t result; + uint32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; case CRYPTO_BUF_VMPAGE: adj = crp->crp_payload_length - result; crp->crp_buf.cb_vm_page_len -= adj; break; default: break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_cipher(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); MPASS(txf->ivsize == csp->csp_ivlen); if (txf->ctxsize != 0) { swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swe->sw_kschedule == NULL) return (ENOMEM); } if (csp->csp_cipher_key != NULL) { error = txf->setkey(swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int swcr_setup_gcm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_nist_gmac_aes_128; break; case 192: axf = &auth_hash_nist_gmac_aes_192; break; case 256: axf = &auth_hash_nist_gmac_aes_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static int swcr_setup_ccm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_ccm_cbc_mac_128; break; case 192: axf = &auth_hash_ccm_cbc_mac_192; break; case 256: axf = &auth_hash_ccm_cbc_mac_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_CCM_IV_LEN) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; struct swcr_encdec *swe; struct swcr_auth *swa; struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; swe = &ses->swcr_encdec; swa = &ses->swcr_auth; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad cipher algo"); #endif default: error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: error = swcr_setup_gcm(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: error = swcr_setup_ccm(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); Index: head/sys/opencrypto/deflate.h =================================================================== --- head/sys/opencrypto/deflate.h (revision 367308) +++ head/sys/opencrypto/deflate.h (revision 367309) @@ -1,56 +1,56 @@ /* $FreeBSD$ */ /* $OpenBSD: deflate.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */ /*- * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Definition for the wrapper around the deflate compression * algorithm used in /sys/crypto */ #ifndef _CRYPTO_DEFLATE_H_ #define _CRYPTO_DEFLATE_H_ #define Z_METHOD 8 #define Z_MEMLEVEL 8 #define MINCOMP 2 /* won't be used, but must be defined */ #define ZBUF 10 -u_int32_t deflate_global(u_int8_t *, u_int32_t, int, u_int8_t **); +uint32_t deflate_global(uint8_t *, uint32_t, int, uint8_t **); /* * We are going to use a combined allocation to hold the metadata * from the struct immediately followed by the real application data. */ struct deflate_buf { struct deflate_buf *next; uint32_t size; uint8_t data[]; }; #endif /* _CRYPTO_DEFLATE_H_ */ Index: head/sys/opencrypto/rmd160.c =================================================================== --- head/sys/opencrypto/rmd160.c (revision 367308) +++ head/sys/opencrypto/rmd160.c (revision 367309) @@ -1,367 +1,367 @@ /* $OpenBSD: rmd160.c,v 1.3 2001/09/26 21:40:13 markus Exp $ */ /*- * Copyright (c) 2001 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Preneel, Bosselaers, Dobbertin, "The Cryptographic Hash Function RIPEMD-160", * RSA Laboratories, CryptoBytes, Volume 3, Number 2, Autumn 1997, * ftp://ftp.rsasecurity.com/pub/cryptobytes/crypto3n2.pdf */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define PUT_64BIT_LE(cp, value) do { \ (cp)[7] = (value) >> 56; \ (cp)[6] = (value) >> 48; \ (cp)[5] = (value) >> 40; \ (cp)[4] = (value) >> 32; \ (cp)[3] = (value) >> 24; \ (cp)[2] = (value) >> 16; \ (cp)[1] = (value) >> 8; \ (cp)[0] = (value); } while (0) #define PUT_32BIT_LE(cp, value) do { \ (cp)[3] = (value) >> 24; \ (cp)[2] = (value) >> 16; \ (cp)[1] = (value) >> 8; \ (cp)[0] = (value); } while (0) #define H0 0x67452301U #define H1 0xEFCDAB89U #define H2 0x98BADCFEU #define H3 0x10325476U #define H4 0xC3D2E1F0U #define K0 0x00000000U #define K1 0x5A827999U #define K2 0x6ED9EBA1U #define K3 0x8F1BBCDCU #define K4 0xA953FD4EU #define KK0 0x50A28BE6U #define KK1 0x5C4DD124U #define KK2 0x6D703EF3U #define KK3 0x7A6D76E9U #define KK4 0x00000000U /* rotate x left n bits. */ #define ROL(n, x) (((x) << (n)) | ((x) >> (32-(n)))) #define F0(x, y, z) ((x) ^ (y) ^ (z)) #define F1(x, y, z) (((x) & (y)) | ((~x) & (z))) #define F2(x, y, z) (((x) | (~y)) ^ (z)) #define F3(x, y, z) (((x) & (z)) | ((y) & (~z))) #define F4(x, y, z) ((x) ^ ((y) | (~z))) #define R(a, b, c, d, e, Fj, Kj, sj, rj) \ do { \ a = ROL(sj, a + Fj(b,c,d) + X(rj) + Kj) + e; \ c = ROL(10, c); \ } while(0) #define X(i) x[i] static u_char PADDING[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; void RMD160Init(RMD160_CTX *ctx) { ctx->count = 0; ctx->state[0] = H0; ctx->state[1] = H1; ctx->state[2] = H2; ctx->state[3] = H3; ctx->state[4] = H4; } void -RMD160Update(RMD160_CTX *ctx, const u_char *input, u_int32_t len) +RMD160Update(RMD160_CTX *ctx, const u_char *input, uint32_t len) { - u_int32_t have, off, need; + uint32_t have, off, need; have = (ctx->count/8) % 64; need = 64 - have; ctx->count += 8 * len; off = 0; if (len >= need) { if (have) { memcpy(ctx->buffer + have, input, need); RMD160Transform(ctx->state, ctx->buffer); off = need; have = 0; } /* now the buffer is empty */ while (off + 64 <= len) { RMD160Transform(ctx->state, input+off); off += 64; } } if (off < len) memcpy(ctx->buffer + have, input+off, len-off); } void RMD160Final(u_char digest[20], RMD160_CTX *ctx) { int i; u_char size[8]; - u_int32_t padlen; + uint32_t padlen; PUT_64BIT_LE(size, ctx->count); /* * pad to 64 byte blocks, at least one byte from PADDING plus 8 bytes * for the size */ padlen = 64 - ((ctx->count/8) % 64); if (padlen < 1 + 8) padlen += 64; RMD160Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */ RMD160Update(ctx, size, 8); if (digest != NULL) for (i = 0; i < 5; i++) PUT_32BIT_LE(digest + i*4, ctx->state[i]); memset(ctx, 0, sizeof (*ctx)); } void -RMD160Transform(u_int32_t state[5], const u_char block[64]) +RMD160Transform(uint32_t state[5], const u_char block[64]) { - u_int32_t a, b, c, d, e, aa, bb, cc, dd, ee, t, x[16]; + uint32_t a, b, c, d, e, aa, bb, cc, dd, ee, t, x[16]; #if BYTE_ORDER == LITTLE_ENDIAN memcpy(x, block, 64); #else int i; for (i = 0; i < 16; i++) - x[i] = bswap32(*(const u_int32_t*)(block+i*4)); + x[i] = bswap32(*(const uint32_t*)(block+i*4)); #endif a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; /* Round 1 */ R(a, b, c, d, e, F0, K0, 11, 0); R(e, a, b, c, d, F0, K0, 14, 1); R(d, e, a, b, c, F0, K0, 15, 2); R(c, d, e, a, b, F0, K0, 12, 3); R(b, c, d, e, a, F0, K0, 5, 4); R(a, b, c, d, e, F0, K0, 8, 5); R(e, a, b, c, d, F0, K0, 7, 6); R(d, e, a, b, c, F0, K0, 9, 7); R(c, d, e, a, b, F0, K0, 11, 8); R(b, c, d, e, a, F0, K0, 13, 9); R(a, b, c, d, e, F0, K0, 14, 10); R(e, a, b, c, d, F0, K0, 15, 11); R(d, e, a, b, c, F0, K0, 6, 12); R(c, d, e, a, b, F0, K0, 7, 13); R(b, c, d, e, a, F0, K0, 9, 14); R(a, b, c, d, e, F0, K0, 8, 15); /* #15 */ /* Round 2 */ R(e, a, b, c, d, F1, K1, 7, 7); R(d, e, a, b, c, F1, K1, 6, 4); R(c, d, e, a, b, F1, K1, 8, 13); R(b, c, d, e, a, F1, K1, 13, 1); R(a, b, c, d, e, F1, K1, 11, 10); R(e, a, b, c, d, F1, K1, 9, 6); R(d, e, a, b, c, F1, K1, 7, 15); R(c, d, e, a, b, F1, K1, 15, 3); R(b, c, d, e, a, F1, K1, 7, 12); R(a, b, c, d, e, F1, K1, 12, 0); R(e, a, b, c, d, F1, K1, 15, 9); R(d, e, a, b, c, F1, K1, 9, 5); R(c, d, e, a, b, F1, K1, 11, 2); R(b, c, d, e, a, F1, K1, 7, 14); R(a, b, c, d, e, F1, K1, 13, 11); R(e, a, b, c, d, F1, K1, 12, 8); /* #31 */ /* Round 3 */ R(d, e, a, b, c, F2, K2, 11, 3); R(c, d, e, a, b, F2, K2, 13, 10); R(b, c, d, e, a, F2, K2, 6, 14); R(a, b, c, d, e, F2, K2, 7, 4); R(e, a, b, c, d, F2, K2, 14, 9); R(d, e, a, b, c, F2, K2, 9, 15); R(c, d, e, a, b, F2, K2, 13, 8); R(b, c, d, e, a, F2, K2, 15, 1); R(a, b, c, d, e, F2, K2, 14, 2); R(e, a, b, c, d, F2, K2, 8, 7); R(d, e, a, b, c, F2, K2, 13, 0); R(c, d, e, a, b, F2, K2, 6, 6); R(b, c, d, e, a, F2, K2, 5, 13); R(a, b, c, d, e, F2, K2, 12, 11); R(e, a, b, c, d, F2, K2, 7, 5); R(d, e, a, b, c, F2, K2, 5, 12); /* #47 */ /* Round 4 */ R(c, d, e, a, b, F3, K3, 11, 1); R(b, c, d, e, a, F3, K3, 12, 9); R(a, b, c, d, e, F3, K3, 14, 11); R(e, a, b, c, d, F3, K3, 15, 10); R(d, e, a, b, c, F3, K3, 14, 0); R(c, d, e, a, b, F3, K3, 15, 8); R(b, c, d, e, a, F3, K3, 9, 12); R(a, b, c, d, e, F3, K3, 8, 4); R(e, a, b, c, d, F3, K3, 9, 13); R(d, e, a, b, c, F3, K3, 14, 3); R(c, d, e, a, b, F3, K3, 5, 7); R(b, c, d, e, a, F3, K3, 6, 15); R(a, b, c, d, e, F3, K3, 8, 14); R(e, a, b, c, d, F3, K3, 6, 5); R(d, e, a, b, c, F3, K3, 5, 6); R(c, d, e, a, b, F3, K3, 12, 2); /* #63 */ /* Round 5 */ R(b, c, d, e, a, F4, K4, 9, 4); R(a, b, c, d, e, F4, K4, 15, 0); R(e, a, b, c, d, F4, K4, 5, 5); R(d, e, a, b, c, F4, K4, 11, 9); R(c, d, e, a, b, F4, K4, 6, 7); R(b, c, d, e, a, F4, K4, 8, 12); R(a, b, c, d, e, F4, K4, 13, 2); R(e, a, b, c, d, F4, K4, 12, 10); R(d, e, a, b, c, F4, K4, 5, 14); R(c, d, e, a, b, F4, K4, 12, 1); R(b, c, d, e, a, F4, K4, 13, 3); R(a, b, c, d, e, F4, K4, 14, 8); R(e, a, b, c, d, F4, K4, 11, 11); R(d, e, a, b, c, F4, K4, 8, 6); R(c, d, e, a, b, F4, K4, 5, 15); R(b, c, d, e, a, F4, K4, 6, 13); /* #79 */ aa = a ; bb = b; cc = c; dd = d; ee = e; a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; /* Parallel round 1 */ R(a, b, c, d, e, F4, KK0, 8, 5); R(e, a, b, c, d, F4, KK0, 9, 14); R(d, e, a, b, c, F4, KK0, 9, 7); R(c, d, e, a, b, F4, KK0, 11, 0); R(b, c, d, e, a, F4, KK0, 13, 9); R(a, b, c, d, e, F4, KK0, 15, 2); R(e, a, b, c, d, F4, KK0, 15, 11); R(d, e, a, b, c, F4, KK0, 5, 4); R(c, d, e, a, b, F4, KK0, 7, 13); R(b, c, d, e, a, F4, KK0, 7, 6); R(a, b, c, d, e, F4, KK0, 8, 15); R(e, a, b, c, d, F4, KK0, 11, 8); R(d, e, a, b, c, F4, KK0, 14, 1); R(c, d, e, a, b, F4, KK0, 14, 10); R(b, c, d, e, a, F4, KK0, 12, 3); R(a, b, c, d, e, F4, KK0, 6, 12); /* #15 */ /* Parallel round 2 */ R(e, a, b, c, d, F3, KK1, 9, 6); R(d, e, a, b, c, F3, KK1, 13, 11); R(c, d, e, a, b, F3, KK1, 15, 3); R(b, c, d, e, a, F3, KK1, 7, 7); R(a, b, c, d, e, F3, KK1, 12, 0); R(e, a, b, c, d, F3, KK1, 8, 13); R(d, e, a, b, c, F3, KK1, 9, 5); R(c, d, e, a, b, F3, KK1, 11, 10); R(b, c, d, e, a, F3, KK1, 7, 14); R(a, b, c, d, e, F3, KK1, 7, 15); R(e, a, b, c, d, F3, KK1, 12, 8); R(d, e, a, b, c, F3, KK1, 7, 12); R(c, d, e, a, b, F3, KK1, 6, 4); R(b, c, d, e, a, F3, KK1, 15, 9); R(a, b, c, d, e, F3, KK1, 13, 1); R(e, a, b, c, d, F3, KK1, 11, 2); /* #31 */ /* Parallel round 3 */ R(d, e, a, b, c, F2, KK2, 9, 15); R(c, d, e, a, b, F2, KK2, 7, 5); R(b, c, d, e, a, F2, KK2, 15, 1); R(a, b, c, d, e, F2, KK2, 11, 3); R(e, a, b, c, d, F2, KK2, 8, 7); R(d, e, a, b, c, F2, KK2, 6, 14); R(c, d, e, a, b, F2, KK2, 6, 6); R(b, c, d, e, a, F2, KK2, 14, 9); R(a, b, c, d, e, F2, KK2, 12, 11); R(e, a, b, c, d, F2, KK2, 13, 8); R(d, e, a, b, c, F2, KK2, 5, 12); R(c, d, e, a, b, F2, KK2, 14, 2); R(b, c, d, e, a, F2, KK2, 13, 10); R(a, b, c, d, e, F2, KK2, 13, 0); R(e, a, b, c, d, F2, KK2, 7, 4); R(d, e, a, b, c, F2, KK2, 5, 13); /* #47 */ /* Parallel round 4 */ R(c, d, e, a, b, F1, KK3, 15, 8); R(b, c, d, e, a, F1, KK3, 5, 6); R(a, b, c, d, e, F1, KK3, 8, 4); R(e, a, b, c, d, F1, KK3, 11, 1); R(d, e, a, b, c, F1, KK3, 14, 3); R(c, d, e, a, b, F1, KK3, 14, 11); R(b, c, d, e, a, F1, KK3, 6, 15); R(a, b, c, d, e, F1, KK3, 14, 0); R(e, a, b, c, d, F1, KK3, 6, 5); R(d, e, a, b, c, F1, KK3, 9, 12); R(c, d, e, a, b, F1, KK3, 12, 2); R(b, c, d, e, a, F1, KK3, 9, 13); R(a, b, c, d, e, F1, KK3, 12, 9); R(e, a, b, c, d, F1, KK3, 5, 7); R(d, e, a, b, c, F1, KK3, 15, 10); R(c, d, e, a, b, F1, KK3, 8, 14); /* #63 */ /* Parallel round 5 */ R(b, c, d, e, a, F0, KK4, 8, 12); R(a, b, c, d, e, F0, KK4, 5, 15); R(e, a, b, c, d, F0, KK4, 12, 10); R(d, e, a, b, c, F0, KK4, 9, 4); R(c, d, e, a, b, F0, KK4, 12, 1); R(b, c, d, e, a, F0, KK4, 5, 5); R(a, b, c, d, e, F0, KK4, 14, 8); R(e, a, b, c, d, F0, KK4, 6, 7); R(d, e, a, b, c, F0, KK4, 8, 6); R(c, d, e, a, b, F0, KK4, 13, 2); R(b, c, d, e, a, F0, KK4, 6, 13); R(a, b, c, d, e, F0, KK4, 5, 14); R(e, a, b, c, d, F0, KK4, 15, 0); R(d, e, a, b, c, F0, KK4, 13, 3); R(c, d, e, a, b, F0, KK4, 11, 9); R(b, c, d, e, a, F0, KK4, 11, 11); /* #79 */ t = state[1] + cc + d; state[1] = state[2] + dd + e; state[2] = state[3] + ee + a; state[3] = state[4] + aa + b; state[4] = state[0] + bb + c; state[0] = t; } Index: head/sys/opencrypto/rmd160.h =================================================================== --- head/sys/opencrypto/rmd160.h (revision 367308) +++ head/sys/opencrypto/rmd160.h (revision 367309) @@ -1,41 +1,41 @@ /* $FreeBSD$ */ /* $OpenBSD: rmd160.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */ /*- * Copyright (c) 2001 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RMD160_H #define _RMD160_H /* RMD160 context. */ typedef struct RMD160Context { - u_int32_t state[5]; /* state */ - u_int64_t count; /* number of bits, modulo 2^64 */ + uint32_t state[5]; /* state */ + uint64_t count; /* number of bits, modulo 2^64 */ u_char buffer[64]; /* input buffer */ } RMD160_CTX; void RMD160Init(RMD160_CTX *); -void RMD160Transform(u_int32_t [5], const u_char [64]); -void RMD160Update(RMD160_CTX *, const u_char *, u_int32_t); +void RMD160Transform(uint32_t [5], const u_char [64]); +void RMD160Update(RMD160_CTX *, const u_char *, uint32_t); void RMD160Final(u_char [20], RMD160_CTX *); #endif /* _RMD160_H */ Index: head/sys/opencrypto/xform_auth.h =================================================================== --- head/sys/opencrypto/xform_auth.h (revision 367308) +++ head/sys/opencrypto/xform_auth.h (revision 367309) @@ -1,99 +1,99 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_AUTH_H_ #define _CRYPTO_XFORM_AUTH_H_ #include #include #include #include #include #include #include #include #include #include #include /* XXX use a define common with other hash stuff ! */ #define AH_ALEN_MAX 64 /* max authenticator hash length */ /* Declarations */ struct auth_hash { int type; char *name; - u_int16_t keysize; - u_int16_t hashsize; - u_int16_t ctxsize; - u_int16_t blocksize; + uint16_t keysize; + uint16_t hashsize; + uint16_t ctxsize; + uint16_t blocksize; void (*Init) (void *); void (*Setkey) (void *, const uint8_t *, u_int); void (*Reinit) (void *, const uint8_t *, u_int); int (*Update) (void *, const void *, u_int); - void (*Final) (u_int8_t *, void *); + void (*Final) (uint8_t *, void *); }; extern struct auth_hash auth_hash_null; extern struct auth_hash auth_hash_hmac_sha1; extern struct auth_hash auth_hash_hmac_ripemd_160; extern struct auth_hash auth_hash_hmac_sha2_224; extern struct auth_hash auth_hash_hmac_sha2_256; extern struct auth_hash auth_hash_hmac_sha2_384; extern struct auth_hash auth_hash_hmac_sha2_512; extern struct auth_hash auth_hash_sha1; extern struct auth_hash auth_hash_sha2_224; extern struct auth_hash auth_hash_sha2_256; extern struct auth_hash auth_hash_sha2_384; extern struct auth_hash auth_hash_sha2_512; extern struct auth_hash auth_hash_nist_gmac_aes_128; extern struct auth_hash auth_hash_nist_gmac_aes_192; extern struct auth_hash auth_hash_nist_gmac_aes_256; extern struct auth_hash auth_hash_blake2b; extern struct auth_hash auth_hash_blake2s; extern struct auth_hash auth_hash_poly1305; extern struct auth_hash auth_hash_ccm_cbc_mac_128; extern struct auth_hash auth_hash_ccm_cbc_mac_192; extern struct auth_hash auth_hash_ccm_cbc_mac_256; union authctx { SHA1_CTX sha1ctx; RMD160_CTX rmd160ctx; SHA224_CTX sha224ctx; SHA256_CTX sha256ctx; SHA384_CTX sha384ctx; SHA512_CTX sha512ctx; struct aes_gmac_ctx aes_gmac_ctx; struct aes_cbc_mac_ctx aes_cbc_mac_ctx; }; #endif /* _CRYPTO_XFORM_AUTH_H_ */ Index: head/sys/opencrypto/xform_comp.h =================================================================== --- head/sys/opencrypto/xform_comp.h (revision 367308) +++ head/sys/opencrypto/xform_comp.h (revision 367309) @@ -1,51 +1,51 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_COMP_H_ #define _CRYPTO_XFORM_COMP_H_ #include #include #include #include /* Declarations */ struct comp_algo { int type; char *name; size_t minlen; - u_int32_t (*compress) (u_int8_t *, u_int32_t, u_int8_t **); - u_int32_t (*decompress) (u_int8_t *, u_int32_t, u_int8_t **); + uint32_t (*compress) (uint8_t *, uint32_t, uint8_t **); + uint32_t (*decompress) (uint8_t *, uint32_t, uint8_t **); }; extern struct comp_algo comp_algo_deflate; #endif /* _CRYPTO_XFORM_COMP_H_ */ Index: head/sys/opencrypto/xform_deflate.c =================================================================== --- head/sys/opencrypto/xform_deflate.c (revision 367308) +++ head/sys/opencrypto/xform_deflate.c (revision 367309) @@ -1,86 +1,86 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include -static u_int32_t deflate_compress(u_int8_t *, u_int32_t, u_int8_t **); -static u_int32_t deflate_decompress(u_int8_t *, u_int32_t, u_int8_t **); +static uint32_t deflate_compress(uint8_t *, uint32_t, uint8_t **); +static uint32_t deflate_decompress(uint8_t *, uint32_t, uint8_t **); /* Compression instance */ struct comp_algo comp_algo_deflate = { CRYPTO_DEFLATE_COMP, "Deflate", 90, deflate_compress, deflate_decompress }; /* * And compression */ -static u_int32_t +static uint32_t deflate_compress(data, size, out) - u_int8_t *data; - u_int32_t size; - u_int8_t **out; + uint8_t *data; + uint32_t size; + uint8_t **out; { return deflate_global(data, size, 0, out); } -static u_int32_t +static uint32_t deflate_decompress(data, size, out) - u_int8_t *data; - u_int32_t size; - u_int8_t **out; + uint8_t *data; + uint32_t size; + uint8_t **out; { return deflate_global(data, size, 1, out); } Index: head/sys/opencrypto/xform_enc.h =================================================================== --- head/sys/opencrypto/xform_enc.h (revision 367308) +++ head/sys/opencrypto/xform_enc.h (revision 367309) @@ -1,99 +1,99 @@ /* $FreeBSD$ */ /* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #ifndef _CRYPTO_XFORM_ENC_H_ #define _CRYPTO_XFORM_ENC_H_ #include #include #include #include #include #ifdef _STANDALONE #include #endif #define AESICM_BLOCKSIZE AES_BLOCK_LEN #define AES_XTS_BLOCKSIZE 16 #define AES_XTS_IVSIZE 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Declarations */ struct enc_xform { int type; char *name; size_t ctxsize; - u_int16_t blocksize; /* Required input block size -- 1 for stream ciphers. */ + uint16_t blocksize; /* Required input block size -- 1 for stream ciphers. */ uint16_t native_blocksize; /* Used for stream ciphers. */ - u_int16_t ivsize; - u_int16_t minkey, maxkey; + uint16_t ivsize; + uint16_t minkey, maxkey; /* * Encrypt/decrypt a single block. For stream ciphers this * encrypts/decrypts a single "native" block. */ void (*encrypt) (void *, const uint8_t *, uint8_t *); void (*decrypt) (void *, const uint8_t *, uint8_t *); int (*setkey) (void *, const uint8_t *, int len); - void (*reinit) (void *, const u_int8_t *); + void (*reinit) (void *, const uint8_t *); /* * For stream ciphers, encrypt/decrypt the final partial block * of 'len' bytes. */ void (*encrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); void (*decrypt_last) (void *, const uint8_t *, uint8_t *, size_t len); }; extern struct enc_xform enc_xform_null; extern struct enc_xform enc_xform_rijndael128; extern struct enc_xform enc_xform_aes_icm; extern struct enc_xform enc_xform_aes_nist_gcm; extern struct enc_xform enc_xform_aes_nist_gmac; extern struct enc_xform enc_xform_aes_xts; extern struct enc_xform enc_xform_camellia; extern struct enc_xform enc_xform_chacha20; extern struct enc_xform enc_xform_ccm; struct aes_icm_ctx { - u_int32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)]; + uint32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)]; /* ac_block is initialized to IV */ - u_int8_t ac_block[AESICM_BLOCKSIZE]; + uint8_t ac_block[AESICM_BLOCKSIZE]; int ac_nr; }; struct aes_xts_ctx { rijndael_ctx key1; rijndael_ctx key2; - u_int8_t tweak[AES_XTS_BLOCKSIZE]; + uint8_t tweak[AES_XTS_BLOCKSIZE]; }; #endif /* _CRYPTO_XFORM_ENC_H_ */ Index: head/sys/opencrypto/xform_null.c =================================================================== --- head/sys/opencrypto/xform_null.c (revision 367308) +++ head/sys/opencrypto/xform_null.c (revision 367309) @@ -1,132 +1,132 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include -static int null_setkey(void *, const u_int8_t *, int); +static int null_setkey(void *, const uint8_t *, int); static void null_crypt(void *, const uint8_t *, uint8_t *); static void null_init(void *); static void null_reinit(void *ctx, const uint8_t *buf, u_int len); static int null_update(void *, const void *, u_int); -static void null_final(u_int8_t *, void *); +static void null_final(uint8_t *, void *); /* Encryption instances */ struct enc_xform enc_xform_null = { .type = CRYPTO_NULL_CBC, .name = "NULL", /* NB: blocksize of 4 is to generate a properly aligned ESP header */ .blocksize = NULL_BLOCK_LEN, .ivsize = 0, .minkey = NULL_MIN_KEY, .maxkey = NULL_MAX_KEY, .encrypt = null_crypt, .decrypt = null_crypt, .setkey = null_setkey, }; /* Authentication instances */ struct auth_hash auth_hash_null = { .type = CRYPTO_NULL_HMAC, .name = "NULL-HMAC", .keysize = 0, .hashsize = NULL_HASH_LEN, .ctxsize = sizeof(int), /* NB: context isn't used */ .blocksize = NULL_HMAC_BLOCK_LEN, .Init = null_init, .Setkey = null_reinit, .Reinit = null_reinit, .Update = null_update, .Final = null_final, }; /* * Encryption wrapper routines. */ static void null_crypt(void *key, const uint8_t *in, uint8_t *out) { } static int null_setkey(void *sched, const uint8_t *key, int len) { return (0); } /* * And now for auth. */ static void null_init(void *ctx) { } static void null_reinit(void *ctx, const uint8_t *buf, u_int len) { } static int null_update(void *ctx, const void *buf, u_int len) { return 0; } static void -null_final(u_int8_t *buf, void *ctx) +null_final(uint8_t *buf, void *ctx) { - if (buf != (u_int8_t *) 0) + if (buf != (uint8_t *) 0) bzero(buf, 12); } Index: head/sys/opencrypto/xform_rijndael.c =================================================================== --- head/sys/opencrypto/xform_rijndael.c (revision 367308) +++ head/sys/opencrypto/xform_rijndael.c (revision 367309) @@ -1,98 +1,98 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include -static int rijndael128_setkey(void *, const u_int8_t *, int); +static int rijndael128_setkey(void *, const uint8_t *, int); static void rijndael128_encrypt(void *, const uint8_t *, uint8_t *); static void rijndael128_decrypt(void *, const uint8_t *, uint8_t *); /* Encryption instances */ struct enc_xform enc_xform_rijndael128 = { .type = CRYPTO_RIJNDAEL128_CBC, .name = "Rijndael-128/AES", .ctxsize = sizeof(rijndael_ctx), .blocksize = RIJNDAEL128_BLOCK_LEN, .ivsize = RIJNDAEL128_BLOCK_LEN, .minkey = RIJNDAEL_MIN_KEY, .maxkey = RIJNDAEL_MAX_KEY, .encrypt = rijndael128_encrypt, .decrypt = rijndael128_decrypt, .setkey = rijndael128_setkey, }; /* * Encryption wrapper routines. */ static void rijndael128_encrypt(void *key, const uint8_t *in, uint8_t *out) { rijndael_encrypt(key, in, out); } static void rijndael128_decrypt(void *key, const uint8_t *in, uint8_t *out) { rijndael_decrypt(key, in, out); } static int rijndael128_setkey(void *sched, const uint8_t *key, int len) { if (len != 16 && len != 24 && len != 32) return (EINVAL); rijndael_set_key(sched, key, len * 8); return (0); } Index: head/sys/opencrypto/xform_sha1.c =================================================================== --- head/sys/opencrypto/xform_sha1.c (revision 367308) +++ head/sys/opencrypto/xform_sha1.c (revision 367309) @@ -1,105 +1,105 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include static void SHA1Init_int(void *); static int SHA1Update_int(void *, const void *, u_int); -static void SHA1Final_int(u_int8_t *, void *); +static void SHA1Final_int(uint8_t *, void *); /* Plain hash */ struct auth_hash auth_hash_sha1 = { .type = CRYPTO_SHA1, .name = "SHA1", .hashsize = SHA1_HASH_LEN, .ctxsize = sizeof(SHA1_CTX), .blocksize = SHA1_BLOCK_LEN, .Init = SHA1Init_int, .Update = SHA1Update_int, .Final = SHA1Final_int, }; /* Authentication instances */ struct auth_hash auth_hash_hmac_sha1 = { .type = CRYPTO_SHA1_HMAC, .name = "HMAC-SHA1", .keysize = SHA1_BLOCK_LEN, .hashsize = SHA1_HASH_LEN, .ctxsize = sizeof(SHA1_CTX), .blocksize = SHA1_BLOCK_LEN, .Init = SHA1Init_int, .Update = SHA1Update_int, .Final = SHA1Final_int, }; /* * And now for auth. */ static void SHA1Init_int(void *ctx) { SHA1Init(ctx); } static int SHA1Update_int(void *ctx, const void *buf, u_int len) { SHA1Update(ctx, buf, len); return 0; } static void -SHA1Final_int(u_int8_t *blk, void *ctx) +SHA1Final_int(uint8_t *blk, void *ctx) { SHA1Final(blk, ctx); } Index: head/sys/opencrypto/xform_sha2.c =================================================================== --- head/sys/opencrypto/xform_sha2.c (revision 367308) +++ head/sys/opencrypto/xform_sha2.c (revision 367309) @@ -1,190 +1,190 @@ /* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr), * Niels Provos (provos@physnet.uni-hamburg.de) and * Damien Miller (djm@mindrot.org). * * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * AES XTS implementation in 2008 by Damien Miller * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * * Copyright (C) 2001, Angelos D. Keromytis. * * Copyright (C) 2008, Damien Miller * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include static int SHA224Update_int(void *, const void *, u_int); static int SHA256Update_int(void *, const void *, u_int); static int SHA384Update_int(void *, const void *, u_int); static int SHA512Update_int(void *, const void *, u_int); /* Plain hashes */ struct auth_hash auth_hash_sha2_224 = { .type = CRYPTO_SHA2_224, .name = "SHA2-224", .hashsize = SHA2_224_HASH_LEN, .ctxsize = sizeof(SHA224_CTX), .blocksize = SHA2_224_BLOCK_LEN, .Init = (void (*)(void *)) SHA224_Init, .Update = SHA224Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA224_Final, + .Final = (void (*)(uint8_t *, void *)) SHA224_Final, }; struct auth_hash auth_hash_sha2_256 = { .type = CRYPTO_SHA2_256, .name = "SHA2-256", .keysize = SHA2_256_BLOCK_LEN, .hashsize = SHA2_256_HASH_LEN, .ctxsize = sizeof(SHA256_CTX), .blocksize = SHA2_256_BLOCK_LEN, .Init = (void (*)(void *)) SHA256_Init, .Update = SHA256Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA256_Final, + .Final = (void (*)(uint8_t *, void *)) SHA256_Final, }; struct auth_hash auth_hash_sha2_384 = { .type = CRYPTO_SHA2_384, .name = "SHA2-384", .keysize = SHA2_384_BLOCK_LEN, .hashsize = SHA2_384_HASH_LEN, .ctxsize = sizeof(SHA384_CTX), .blocksize = SHA2_384_BLOCK_LEN, .Init = (void (*)(void *)) SHA384_Init, .Update = SHA384Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA384_Final, + .Final = (void (*)(uint8_t *, void *)) SHA384_Final, }; struct auth_hash auth_hash_sha2_512 = { .type = CRYPTO_SHA2_512, .name = "SHA2-512", .keysize = SHA2_512_BLOCK_LEN, .hashsize = SHA2_512_HASH_LEN, .ctxsize = sizeof(SHA512_CTX), .blocksize = SHA2_512_BLOCK_LEN, .Init = (void (*)(void *)) SHA512_Init, .Update = SHA512Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA512_Final, + .Final = (void (*)(uint8_t *, void *)) SHA512_Final, }; /* Authentication instances */ struct auth_hash auth_hash_hmac_sha2_224 = { .type = CRYPTO_SHA2_224_HMAC, .name = "HMAC-SHA2-224", .keysize = SHA2_224_BLOCK_LEN, .hashsize = SHA2_224_HASH_LEN, .ctxsize = sizeof(SHA224_CTX), .blocksize = SHA2_224_BLOCK_LEN, .Init = (void (*)(void *)) SHA224_Init, .Update = SHA224Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA224_Final, + .Final = (void (*)(uint8_t *, void *)) SHA224_Final, }; struct auth_hash auth_hash_hmac_sha2_256 = { .type = CRYPTO_SHA2_256_HMAC, .name = "HMAC-SHA2-256", .keysize = SHA2_256_BLOCK_LEN, .hashsize = SHA2_256_HASH_LEN, .ctxsize = sizeof(SHA256_CTX), .blocksize = SHA2_256_BLOCK_LEN, .Init = (void (*)(void *)) SHA256_Init, .Update = SHA256Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA256_Final, + .Final = (void (*)(uint8_t *, void *)) SHA256_Final, }; struct auth_hash auth_hash_hmac_sha2_384 = { .type = CRYPTO_SHA2_384_HMAC, .name = "HMAC-SHA2-384", .keysize = SHA2_384_BLOCK_LEN, .hashsize = SHA2_384_HASH_LEN, .ctxsize = sizeof(SHA384_CTX), .blocksize = SHA2_384_BLOCK_LEN, .Init = (void (*)(void *)) SHA384_Init, .Update = SHA384Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA384_Final, + .Final = (void (*)(uint8_t *, void *)) SHA384_Final, }; struct auth_hash auth_hash_hmac_sha2_512 = { .type = CRYPTO_SHA2_512_HMAC, .name = "HMAC-SHA2-512", .keysize = SHA2_512_BLOCK_LEN, .hashsize = SHA2_512_HASH_LEN, .ctxsize = sizeof(SHA512_CTX), .blocksize = SHA2_512_BLOCK_LEN, .Init = (void (*)(void *)) SHA512_Init, .Update = SHA512Update_int, - .Final = (void (*)(u_int8_t *, void *)) SHA512_Final, + .Final = (void (*)(uint8_t *, void *)) SHA512_Final, }; /* * And now for auth. */ static int SHA224Update_int(void *ctx, const void *buf, u_int len) { SHA224_Update(ctx, buf, len); return 0; } static int SHA256Update_int(void *ctx, const void *buf, u_int len) { SHA256_Update(ctx, buf, len); return 0; } static int SHA384Update_int(void *ctx, const void *buf, u_int len) { SHA384_Update(ctx, buf, len); return 0; } static int SHA512Update_int(void *ctx, const void *buf, u_int len) { SHA512_Update(ctx, buf, len); return 0; }