diff --git a/crypto/openssl/ssl/ktls.c b/crypto/openssl/ssl/ktls.c --- a/crypto/openssl/ssl/ktls.c +++ b/crypto/openssl/ssl/ktls.c @@ -10,6 +10,67 @@ #include "ssl_local.h" #include "internal/ktls.h" +#ifndef OPENSSL_NO_KTLS_RX + /* + * Count the number of records that were not processed yet from record boundary. + * + * This function assumes that there are only fully formed records read in the + * record layer. If read_ahead is enabled, then this might be false and this + * function will fail. + */ +static int count_unprocessed_records(SSL *s) +{ + SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); + PACKET pkt, subpkt; + int count = 0; + + if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left)) + return -1; + + while (PACKET_remaining(&pkt) > 0) { + /* Skip record type and version */ + if (!PACKET_forward(&pkt, 3)) + return -1; + + /* Read until next record */ + if (!PACKET_get_length_prefixed_2(&pkt, &subpkt)) + return -1; + + count += 1; + } + + return count; +} + +/* + * The kernel cannot offload receive if a partial TLS record has been read. + * Check the read buffer for unprocessed records. If the buffer contains a + * partial record, fail and return 0. Otherwise, update the sequence + * number at *rec_seq for the count of unprocessed records and return 1. + */ +static int check_rx_read_ahead(SSL *s, unsigned char *rec_seq) +{ + int bit, count_unprocessed; + + count_unprocessed = count_unprocessed_records(s); + if (count_unprocessed < 0) + return 0; + + /* increment the crypto_info record sequence */ + while (count_unprocessed) { + for (bit = 7; bit >= 0; bit--) { /* increment */ + ++rec_seq[bit]; + if (rec_seq[bit] != 0) + break; + } + count_unprocessed--; + + } + + return 1; +} +#endif + #if defined(__FreeBSD__) # include @@ -59,9 +120,9 @@ } /* Function to configure kernel TLS structure */ -int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, +int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, void *rl_sequence, ktls_crypto_info_t *crypto_info, - unsigned char **rec_seq, unsigned char *iv, + int is_tx, unsigned char *iv, unsigned char *key, unsigned char *mac_key, size_t mac_secret_size) { @@ -111,11 +172,11 @@ crypto_info->tls_vminor = (s->version & 0x000000ff); # ifdef TCP_RXTLS_ENABLE memcpy(crypto_info->rec_seq, rl_sequence, sizeof(crypto_info->rec_seq)); - if (rec_seq != NULL) - *rec_seq = crypto_info->rec_seq; + if (!is_tx && !check_rx_read_ahead(s, crypto_info->rec_seq)) + return 0; # else - if (rec_seq != NULL) - *rec_seq = NULL; + if (!is_tx) + return 0; # endif return 1; }; @@ -163,15 +224,20 @@ } /* Function to configure kernel TLS structure */ -int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, +int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, void *rl_sequence, ktls_crypto_info_t *crypto_info, - unsigned char **rec_seq, unsigned char *iv, + int is_tx, unsigned char *iv, unsigned char *key, unsigned char *mac_key, size_t mac_secret_size) { unsigned char geniv[12]; unsigned char *iiv = iv; +# ifdef OPENSSL_NO_KTLS_RX + if (!is_tx) + return 0; +# endif + if (s->version == TLS1_2_VERSION && EVP_CIPHER_mode(c) == EVP_CIPH_GCM_MODE) { EVP_CIPHER_CTX_ctrl(dd, EVP_CTRL_GET_IV, @@ -194,8 +260,8 @@ memcpy(crypto_info->gcm128.key, key, EVP_CIPHER_key_length(c)); memcpy(crypto_info->gcm128.rec_seq, rl_sequence, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); - if (rec_seq != NULL) - *rec_seq = crypto_info->gcm128.rec_seq; + if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm128.rec_seq)) + return 0; return 1; # endif # ifdef OPENSSL_KTLS_AES_GCM_256 @@ -209,8 +275,8 @@ memcpy(crypto_info->gcm256.key, key, EVP_CIPHER_key_length(c)); memcpy(crypto_info->gcm256.rec_seq, rl_sequence, TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); - if (rec_seq != NULL) - *rec_seq = crypto_info->gcm256.rec_seq; + if (!is_tx && !check_rx_read_ahead(s, crypto_info->gcm256.rec_seq)) + return 0; return 1; # endif # ifdef OPENSSL_KTLS_AES_CCM_128 @@ -224,8 +290,8 @@ memcpy(crypto_info->ccm128.key, key, EVP_CIPHER_key_length(c)); memcpy(crypto_info->ccm128.rec_seq, rl_sequence, TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); - if (rec_seq != NULL) - *rec_seq = crypto_info->ccm128.rec_seq; + if (!is_tx && !check_rx_read_ahead(s, crypto_info->ccm128.rec_seq)) + return 0; return 1; # endif # ifdef OPENSSL_KTLS_CHACHA20_POLY1305 @@ -238,8 +304,10 @@ memcpy(crypto_info->chacha20poly1305.key, key, EVP_CIPHER_key_length(c)); memcpy(crypto_info->chacha20poly1305.rec_seq, rl_sequence, TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); - if (rec_seq != NULL) - *rec_seq = crypto_info->chacha20poly1305.rec_seq; + if (!is_tx + && !check_rx_read_ahead(s, + crypto_info->chacha20poly1305.rec_seq)) + return 0; return 1; # endif default: diff --git a/crypto/openssl/ssl/ssl_local.h b/crypto/openssl/ssl/ssl_local.h --- a/crypto/openssl/ssl/ssl_local.h +++ b/crypto/openssl/ssl/ssl_local.h @@ -2623,9 +2623,9 @@ /* ktls.c */ int ktls_check_supported_cipher(const SSL *s, const EVP_CIPHER *c, const EVP_CIPHER_CTX *dd); -int ktls_configure_crypto(const SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, +int ktls_configure_crypto(SSL *s, const EVP_CIPHER *c, EVP_CIPHER_CTX *dd, void *rl_sequence, ktls_crypto_info_t *crypto_info, - unsigned char **rec_seq, unsigned char *iv, + int is_tx, unsigned char *iv, unsigned char *key, unsigned char *mac_key, size_t mac_secret_size); # endif diff --git a/crypto/openssl/ssl/t1_enc.c b/crypto/openssl/ssl/t1_enc.c --- a/crypto/openssl/ssl/t1_enc.c +++ b/crypto/openssl/ssl/t1_enc.c @@ -82,41 +82,6 @@ return ret; } -#ifndef OPENSSL_NO_KTLS - /* - * Count the number of records that were not processed yet from record boundary. - * - * This function assumes that there are only fully formed records read in the - * record layer. If read_ahead is enabled, then this might be false and this - * function will fail. - */ -# ifndef OPENSSL_NO_KTLS_RX -static int count_unprocessed_records(SSL *s) -{ - SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); - PACKET pkt, subpkt; - int count = 0; - - if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left)) - return -1; - - while (PACKET_remaining(&pkt) > 0) { - /* Skip record type and version */ - if (!PACKET_forward(&pkt, 3)) - return -1; - - /* Read until next record */ - if (PACKET_get_length_prefixed_2(&pkt, &subpkt)) - return -1; - - count += 1; - } - - return count; -} -# endif -#endif - int tls1_change_cipher_state(SSL *s, int which) { unsigned char *p, *mac_secret; @@ -135,12 +100,7 @@ int reuse_dd = 0; #ifndef OPENSSL_NO_KTLS ktls_crypto_info_t crypto_info; - unsigned char *rec_seq; void *rl_sequence; -# ifndef OPENSSL_NO_KTLS_RX - int count_unprocessed; - int bit; -# endif BIO *bio; #endif @@ -403,30 +363,11 @@ else rl_sequence = RECORD_LAYER_get_read_sequence(&s->rlayer); - if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info, &rec_seq, - iv, key, ms, *mac_secret_size)) + if (!ktls_configure_crypto(s, c, dd, rl_sequence, &crypto_info, + which & SSL3_CC_WRITE, iv, key, ms, + *mac_secret_size)) goto skip_ktls; - if (which & SSL3_CC_READ) { -# ifndef OPENSSL_NO_KTLS_RX - count_unprocessed = count_unprocessed_records(s); - if (count_unprocessed < 0) - goto skip_ktls; - - /* increment the crypto_info record sequence */ - while (count_unprocessed) { - for (bit = 7; bit >= 0; bit--) { /* increment */ - ++rec_seq[bit]; - if (rec_seq[bit] != 0) - break; - } - count_unprocessed--; - } -# else - goto skip_ktls; -# endif - } - /* ktls works with user provided buffers directly */ if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) { if (which & SSL3_CC_WRITE) diff --git a/crypto/openssl/ssl/tls13_enc.c b/crypto/openssl/ssl/tls13_enc.c --- a/crypto/openssl/ssl/tls13_enc.c +++ b/crypto/openssl/ssl/tls13_enc.c @@ -756,7 +756,8 @@ /* configure kernel crypto structure */ if (!ktls_configure_crypto(s, cipher, ciph_ctx, RECORD_LAYER_get_write_sequence(&s->rlayer), - &crypto_info, NULL, iv, key, NULL, 0)) + &crypto_info, which & SSL3_CC_WRITE, iv, key, + NULL, 0)) goto skip_ktls; /* ktls works with user provided buffers directly */