diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c --- a/sys/crypto/aesni/aesni.c +++ b/sys/crypto/aesni/aesni.c @@ -855,6 +855,9 @@ const uint8_t *key; int i, keylen; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + if (crp->crp_auth_key != NULL) key = crp->crp_auth_key; else diff --git a/sys/crypto/blake2/blake2_cryptodev.c b/sys/crypto/blake2/blake2_cryptodev.c --- a/sys/crypto/blake2/blake2_cryptodev.c +++ b/sys/crypto/blake2/blake2_cryptodev.c @@ -342,6 +342,9 @@ int error, rc; unsigned klen; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + ctx = NULL; ctxidx = 0; error = EINVAL; diff --git a/sys/crypto/ccp/ccp_hardware.c b/sys/crypto/ccp/ccp_hardware.c --- a/sys/crypto/ccp/ccp_hardware.c +++ b/sys/crypto/ccp/ccp_hardware.c @@ -1212,7 +1212,7 @@ s->pending--; - if (error != 0) { + if (error != 0 || (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) != 0) { crp->crp_etype = error; goto out; } diff --git a/sys/crypto/openssl/ossl.c b/sys/crypto/openssl/ossl.c --- a/sys/crypto/openssl/ossl.c +++ b/sys/crypto/openssl/ossl.c @@ -238,6 +238,9 @@ struct auth_hash *axf; int error; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + axf = s->hash.axf; if (crp->crp_auth_key == NULL) { diff --git a/sys/crypto/via/padlock_hash.c b/sys/crypto/via/padlock_hash.c --- a/sys/crypto/via/padlock_hash.c +++ b/sys/crypto/via/padlock_hash.c @@ -286,6 +286,9 @@ union authctx ctx; int error; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + axf = ses->ses_axf; padlock_copy_ctx(axf, ses->ses_ictx, &ctx); diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c --- a/sys/dev/cesa/cesa.c +++ b/sys/dev/cesa/cesa.c @@ -1528,7 +1528,8 @@ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cr->cr_crp->crp_etype = sc->sc_error; - if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) { + if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0 && + (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(cr->cr_crp, cr->cr_crp->crp_digest_start, diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c --- a/sys/dev/cxgbe/crypto/t4_crypto.c +++ b/sys/dev/cxgbe/crypto/t4_crypto.c @@ -595,7 +595,7 @@ { uint8_t hash[HASH_MAX_LEN]; - if (error) + if (error || (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) != 0) return (error); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h --- a/sys/dev/cxgbe/offload.h +++ b/sys/dev/cxgbe/offload.h @@ -225,9 +225,6 @@ int ddp; int rx_coalesce; int tls; - int tls_rx_timeout; - int *tls_rx_ports; - int num_tls_rx_ports; int tx_align; int tx_zcopy; int cop_managed_offloading; diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -414,11 +414,6 @@ &t4_toe_rexmt_backoff[14], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[15], 0, ""); - -static int t4_toe_tls_rx_timeout = 5; -SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, tls_rx_timeout, CTLFLAG_RDTUN, - &t4_toe_tls_rx_timeout, 0, - "Timeout in seconds to downgrade TLS sockets to plain TOE"); #endif #ifdef DEV_NETMAP @@ -812,8 +807,6 @@ static int sysctl_reset(SYSCTL_HANDLER_ARGS); #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS); -static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS); -static int sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS); static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); @@ -1767,7 +1760,6 @@ free(sc->tids.hpftid_tab, M_CXGBE); free_hftid_hash(&sc->tids); free(sc->tids.tid_tab, M_CXGBE); - free(sc->tt.tls_rx_ports, M_CXGBE); t4_destroy_dma_tag(sc); callout_drain(&sc->ktls_tick); @@ -5539,10 +5531,9 @@ if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && sc->toecaps & FW_CAPS_CONFIG_TOE) { /* - * Limit TOE connections to 2 reassembly "islands". This is - * required for TOE TLS connections to downgrade to plain TOE - * connections if an unsupported TLS version or ciphersuite is - * used. + * Limit TOE connections to 2 reassembly "islands". + * This is required to permit migrating TOE + * connections to UPL_MODE_TLS. */ t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE), V_PASSMODE(2)); @@ -7492,17 +7483,6 @@ CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I", "Inline TLS allowed"); - SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports", - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, - sysctl_tls_rx_ports, "I", - "TCP ports that use inline TLS+TOE RX"); - - sc->tt.tls_rx_timeout = t4_toe_tls_rx_timeout; - SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_timeout", - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, - sysctl_tls_rx_timeout, "I", - "Timeout in seconds to downgrade TLS sockets to plain TOE"); - sc->tt.tx_align = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); @@ -11004,97 +10984,6 @@ } -static int -sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS) -{ - struct adapter *sc = arg1; - int *old_ports, *new_ports; - int i, new_count, rc; - - if (req->newptr == NULL && req->oldptr == NULL) - return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) * - sizeof(sc->tt.tls_rx_ports[0]))); - - rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx"); - if (rc) - return (rc); - - if (hw_off_limits(sc)) { - rc = ENXIO; - goto done; - } - - if (sc->tt.num_tls_rx_ports == 0) { - i = -1; - rc = SYSCTL_OUT(req, &i, sizeof(i)); - } else - rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports, - sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0])); - if (rc == 0 && req->newptr != NULL) { - new_count = req->newlen / sizeof(new_ports[0]); - new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE, - M_WAITOK); - rc = SYSCTL_IN(req, new_ports, new_count * - sizeof(new_ports[0])); - if (rc) - goto err; - - /* Allow setting to a single '-1' to clear the list. */ - if (new_count == 1 && new_ports[0] == -1) { - ADAPTER_LOCK(sc); - old_ports = sc->tt.tls_rx_ports; - sc->tt.tls_rx_ports = NULL; - sc->tt.num_tls_rx_ports = 0; - ADAPTER_UNLOCK(sc); - free(old_ports, M_CXGBE); - } else { - for (i = 0; i < new_count; i++) { - if (new_ports[i] < 1 || - new_ports[i] > IPPORT_MAX) { - rc = EINVAL; - goto err; - } - } - - ADAPTER_LOCK(sc); - old_ports = sc->tt.tls_rx_ports; - sc->tt.tls_rx_ports = new_ports; - sc->tt.num_tls_rx_ports = new_count; - ADAPTER_UNLOCK(sc); - free(old_ports, M_CXGBE); - new_ports = NULL; - } - err: - free(new_ports, M_CXGBE); - } -done: - end_synchronized_op(sc, 0); - return (rc); -} - -static int -sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS) -{ - struct adapter *sc = arg1; - int v, rc; - - v = sc->tt.tls_rx_timeout; - rc = sysctl_handle_int(oidp, &v, 0, req); - if (rc != 0 || req->newptr == NULL) - return (rc); - - if (v < 0) - return (EINVAL); - - if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) - return (ENOTSUP); - - sc->tt.tls_rx_timeout = v; - - return (0); - -} - static void unit_conv(char *buf, size_t len, u_int val, u_int factor) { @@ -12557,9 +12446,6 @@ if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) t4_pktc_idx_ofld = PKTC_IDX_OFLD; - - if (t4_toe_tls_rx_timeout < 0) - t4_toe_tls_rx_timeout = 0; #else if (t4_rdmacaps_allowed == -1) t4_rdmacaps_allowed = 0; diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -98,8 +98,7 @@ nparams = 8; else nparams = 6; - if (ulp_mode(toep) == ULP_MODE_TLS) - nparams++; + /* XXX: I don't think this is ever set yet? */ if (toep->tls.fcplenmax != 0) nparams++; if (toep->params.tc_idx != -1) { @@ -148,8 +147,6 @@ __func__, toep->tid, toep->params.emss, toep->params.sndbuf, tp ? tp->snd_nxt : 0, tp ? tp->rcv_nxt : 0); - if (ulp_mode(toep) == ULP_MODE_TLS) - FLOWC_PARAM(ULP_MODE, ulp_mode(toep)); if (toep->tls.fcplenmax != 0) FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax); if (toep->params.tc_idx != -1) @@ -395,9 +392,6 @@ send_flowc_wr(toep, tp); soisconnected(so); - - if (ulp_mode(toep) == ULP_MODE_TLS) - tls_establish(toep); } int @@ -421,23 +415,6 @@ return (credits); } -void -send_rx_modulate(struct adapter *sc, struct toepcb *toep) -{ - struct wrqe *wr; - struct cpl_rx_data_ack *req; - - wr = alloc_wrqe(sizeof(*req), toep->ctrlq); - if (wr == NULL) - return; - req = wrtod(wr); - - INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); - req->credit_dack = htobe32(F_RX_MODULATE_RX); - - t4_wrq_tx(sc, wr); -} - void t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) { @@ -459,8 +436,7 @@ rx_credits = send_rx_credits(sc, toep, rx_credits); tp->rcv_wnd += rx_credits; tp->rcv_adv += rx_credits; - } else if (toep->flags & TPF_FORCE_CREDITS) - send_rx_modulate(sc, toep); + } } void @@ -1814,6 +1790,8 @@ tid); ddp_queue_toep(toep); } + if (ulp_mode(toep) == ULP_MODE_TLS) + tls_received_starting_data(sc, toep, sb, len); sorwakeup_locked(so); SOCKBUF_UNLOCK_ASSERT(sb); if (ulp_mode(toep) == ULP_MODE_TCPDDP) diff --git a/sys/dev/cxgbe/tom/t4_tls.h b/sys/dev/cxgbe/tom/t4_tls.h --- a/sys/dev/cxgbe/tom/t4_tls.h +++ b/sys/dev/cxgbe/tom/t4_tls.h @@ -35,11 +35,6 @@ #ifdef _KERNEL -/* Timeouts for handshake timer in seconds. */ -#define TLS_SRV_HELLO_DONE 9 -#define TLS_SRV_HELLO_RD_TM 5 -#define TLS_SRV_HELLO_BKOFF_TM 15 - #define CONTENT_TYPE_CCS 20 #define CONTENT_TYPE_ALERT 21 #define CONTENT_TYPE_HANDSHAKE 22 @@ -85,7 +80,7 @@ struct tls_scmd scmd0; u_int iv_len; unsigned int tx_key_info_size; - struct callout handshake_timer; + size_t rx_resid; }; struct tls_hdr { diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c --- a/sys/dev/cxgbe/tom/t4_tls.c +++ b/sys/dev/cxgbe/tom/t4_tls.c @@ -104,27 +104,6 @@ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0); } -static void -tls_clr_ofld_mode(struct toepcb *toep) -{ - - tls_stop_handshake_timer(toep); - - KASSERT(toep->tls.rx_key_addr == -1, - ("%s: tid %d has RX key", __func__, toep->tid)); - - /* Switch to plain TOE mode. */ - t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, - V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)), - V_TCB_ULP_RAW(V_TF_TLS_ENABLE(0))); - t4_set_tls_tcb_field(toep, W_TCB_ULP_TYPE, - V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_NONE)); - t4_clear_rx_quiesce(toep); - - toep->flags &= ~(TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED); - toep->params.ulp_mode = ULP_MODE_NONE; -} - /* TLS/DTLS content type for CPL SFO */ static inline unsigned char tls_content_type(unsigned char content_type) @@ -230,88 +209,29 @@ return (0); } -/* - * In some cases a client connection can hang without sending the - * ServerHelloDone message from the NIC to the host. Send a dummy - * RX_DATA_ACK with RX_MODULATE to unstick the connection. - */ -static void -tls_send_handshake_ack(void *arg) -{ - struct toepcb *toep = arg; - struct tls_ofld_info *tls_ofld = &toep->tls; - struct adapter *sc = td_adapter(toep->td); - - /* Bail without rescheduling if the connection has closed. */ - if ((toep->flags & (TPF_FIN_SENT | TPF_ABORT_SHUTDOWN)) != 0) - return; - - /* - * If this connection has timed out without receiving more - * data, downgrade to plain TOE mode and don't re-arm the - * timer. - */ - if (sc->tt.tls_rx_timeout != 0) { - struct inpcb *inp; - struct tcpcb *tp; - - inp = toep->inp; - tp = intotcpcb(inp); - if ((ticks - tp->t_rcvtime) >= sc->tt.tls_rx_timeout) { - CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__, - toep->tid); - tls_clr_ofld_mode(toep); - return; - } - } - - /* - * XXX: Does not have the t4_get_tcb() checks to refine the - * workaround. - */ - callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz); - - CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid); - send_rx_modulate(sc, toep); -} - -static void -tls_start_handshake_timer(struct toepcb *toep) -{ - struct tls_ofld_info *tls_ofld = &toep->tls; - - INP_WLOCK_ASSERT(toep->inp); - callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz, - tls_send_handshake_ack, toep); -} - -void -tls_stop_handshake_timer(struct toepcb *toep) -{ - struct tls_ofld_info *tls_ofld = &toep->tls; - - INP_WLOCK_ASSERT(toep->inp); - callout_stop(&tls_ofld->handshake_timer); -} - int tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction) { struct adapter *sc = td_adapter(toep->td); - int error, explicit_iv_size, key_offset, mac_first; + int error, explicit_iv_size, mac_first; - if (!can_tls_offload(td_adapter(toep->td))) + if (!can_tls_offload(sc)) return (EINVAL); - switch (ulp_mode(toep)) { - case ULP_MODE_TLS: - break; - case ULP_MODE_NONE: - case ULP_MODE_TCPDDP: - if (direction != KTLS_TX) + + if (direction == KTLS_RX) { + if (ulp_mode(toep) != ULP_MODE_NONE) return (EINVAL); - break; - default: - return (EINVAL); + if ((toep->flags & TPF_TLS_STARTING) != 0) + return (EINVAL); + } else { + switch (ulp_mode(toep)) { + case ULP_MODE_NONE: + case ULP_MODE_TLS: + case ULP_MODE_TCPDDP: + break; + default: + return (EINVAL); + } } switch (tls->params.cipher_algorithm) { @@ -323,8 +243,7 @@ case 256 / 8: break; default: - error = EINVAL; - goto clr_ofld; + return (EINVAL); } switch (tls->params.auth_algorithm) { case CRYPTO_SHA1_HMAC: @@ -332,16 +251,14 @@ case CRYPTO_SHA2_384_HMAC: break; default: - error = EPROTONOSUPPORT; - goto clr_ofld; + return (EPROTONOSUPPORT); } explicit_iv_size = AES_BLOCK_LEN; mac_first = 1; break; case CRYPTO_AES_NIST_GCM_16: if (tls->params.iv_len != SALT_SIZE) { - error = EINVAL; - goto clr_ofld; + return (EINVAL); } switch (tls->params.cipher_key_len) { case 128 / 8: @@ -349,23 +266,20 @@ case 256 / 8: break; default: - error = EINVAL; - goto clr_ofld; + return (EINVAL); } explicit_iv_size = 8; mac_first = 0; break; default: - error = EPROTONOSUPPORT; - goto clr_ofld; + return (EPROTONOSUPPORT); } /* Only TLS 1.1 and TLS 1.2 are currently supported. */ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE || tls->params.tls_vminor < TLS_MINOR_VER_ONE || tls->params.tls_vminor > TLS_MINOR_VER_TWO) { - error = EPROTONOSUPPORT; - goto clr_ofld; + return (EPROTONOSUPPORT); } /* Bail if we already have a key. */ @@ -378,11 +292,8 @@ } error = tls_program_key_id(toep, tls, direction); - if (error) { - if (direction == KTLS_RX) - goto clr_ofld; + if (error) return (error); - } if (direction == KTLS_TX) { toep->tls.scmd0.seqno_numivs = @@ -414,42 +325,17 @@ tls->params.max_frame_len; toep->tls.tx_key_info_size = t4_tls_key_info_size(tls); } else { - /* Stop timer on handshake completion */ - tls_stop_handshake_timer(toep); - - toep->flags &= ~TPF_FORCE_CREDITS; - toep->flags |= TPF_TLS_RECEIVE; + toep->flags |= TPF_TLS_STARTING; toep->tls.rx_version = tls->params.tls_vmajor << 8 | tls->params.tls_vminor; - /* - * RX key tags are an index into the key portion of MA - * memory stored as an offset from the base address in - * units of 64 bytes. - */ - key_offset = toep->tls.rx_key_addr - sc->vres.key.start; - t4_set_tls_keyid(toep, key_offset / 64); - t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, - V_TCB_ULP_RAW(M_TCB_ULP_RAW), - V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | - V_TF_TLS_CONTROL(1) | - V_TF_TLS_ACTIVE(1) | - V_TF_TLS_ENABLE(1)))); - t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ, - V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), - V_TCB_TLS_SEQ(0)); - t4_clear_rx_quiesce(toep); + CTR2(KTR_CXGBE, "%s: tid %d setting RX_QUIESCE", __func__, + toep->tid); + t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS, + V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM); } return (0); - -clr_ofld: - if (ulp_mode(toep) == ULP_MODE_TLS) { - CTR2(KTR_CXGBE, "%s: tid %d clr_ofld_mode", __func__, - toep->tid); - tls_clr_ofld_mode(toep); - } - return (error); } void @@ -461,42 +347,10 @@ tls_ofld->tx_key_addr = -1; } -void -tls_establish(struct toepcb *toep) -{ - - /* - * Enable PDU extraction. - * - * XXX: Supposedly this should be done by the firmware when - * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but - * in practice this seems to be required. - */ - CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid); - t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW), - V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1))); - - toep->flags |= TPF_FORCE_CREDITS | TPF_TLS_ESTABLISHED; - - callout_init_rw(&toep->tls.handshake_timer, &toep->inp->inp_lock, 0); - tls_start_handshake_timer(toep); -} - -void -tls_detach(struct toepcb *toep) -{ - - if (toep->flags & TPF_TLS_ESTABLISHED) { - tls_stop_handshake_timer(toep); - toep->flags &= ~TPF_TLS_ESTABLISHED; - } -} - void tls_uninit_toep(struct toepcb *toep) { - MPASS((toep->flags & TPF_TLS_ESTABLISHED) == 0); clear_tls_keyid(toep); } @@ -1015,6 +869,9 @@ /* Report decryption errors as EBADMSG. */ if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) { + CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x", + __func__, toep->tid, tls_hdr_pkt->res_to_mac_error, + be32toh(cpl->ddp_valid)); m_freem(m); m_freem(tls_data); @@ -1232,12 +1089,183 @@ m_freem(m); } +/* + * Finish converting a connection to ULP_MODE_TLS and enable TLS + * decryption. + */ +static void +finish_tls_rx_enable(struct adapter *sc, struct toepcb *toep, uint64_t seqno) +{ + int key_offset; + + CTR3(KTR_CXGBE, "%s: tid %d seqno %lu", __func__, toep->tid, seqno); + + /* + * RX key tags are an index into the key portion of MA memory + * stored as an offset from the base address in units of 64 + * bytes. + */ + key_offset = toep->tls.rx_key_addr - sc->vres.key.start; + t4_set_tls_keyid(toep, key_offset / 64); + t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ, V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), + V_TCB_TLS_SEQ(seqno)); + + /* XXX: Just set ACTIVE? */ + t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW), + V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) | + V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1)))); + t4_clear_rx_quiesce(toep); + + toep->flags &= ~TPF_TLS_STARTING; + toep->flags |= TPF_TLS_RECEIVE; +} + +/* + * Examine the pending data in the socket buffer and either finish + * enabling TLS RX or request more unencrypted data. + */ +static void +tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep, + struct sockbuf *sb) +{ + uint64_t seqno; + size_t resid; + bool have_header; + + SOCKBUF_LOCK_ASSERT(sb); + MPASS(toep->tls.rx_resid == 0); + + have_header = ktls_pending_rx_info(sb, &seqno, &resid); + CTR5(KTR_CXGBE, "%s: tid %d have_header %d seqno %lu resid %zu", + __func__, toep->tid, have_header, seqno, resid); + + /* + * If socket buffer is empty or only contains complete TLS + * records, enable TLS decryption. + */ + if (resid == 0) { + finish_tls_rx_enable(sc, toep, seqno); + return; + } + + /* + * If we have a partial header at the end of the socket + * buffer, just ask for a complete TLS header (the smallest + * number of bytes possible) and check again after that data + * is received. + * + * If we need fewer bytes than the size of a TLS header to + * complete the last record in the socket buffer, punt a bit. + * We can't ask for fewer bytes, so instead ask for the + * remaining bytes plus the next full TLS header. This will + * let us ask for the exact payload needed on the next check + * and get back in sync after that. + */ + if (!have_header) { + printf( + "%s: tid %u requesting %zu bytes for missing %zu header bytes\n", + __func__, toep->tid, sizeof(struct tls_hdr), resid); + resid = sizeof(struct tls_hdr); + } else if (resid < sizeof(struct tls_hdr)) { + resid += sizeof(struct tls_hdr); + printf("%s: tid %u requesting %zu bytes for trailer + header\n", + __func__, toep->tid, resid); + } else + printf("%s: tid %u requesting %zu bytes for remainder\n", + __func__, toep->tid, resid); + + /* + * Set PDU length. This is treating the 'resid' bytes as a + * TLS PDU, so the first 5 bytes are a fake header and the + * rest are the PDU length. + */ + t4_set_tls_tcb_field(toep, W_TCB_TX_PDU_LEN, + V_TCB_TX_PDU_LEN(M_TCB_TX_PDU_LEN), + V_TCB_TX_PDU_LEN(resid - sizeof(struct tls_hdr))); + CTR3(KTR_CXGBE, "%s: tid %d setting TX_PDU_LEN to %zu", __func__, + toep->tid, resid - sizeof(struct tls_hdr)); + + /* Allow the 'resid' bytes to be delivered as CPL_RX_DATA. */ + toep->tls.rx_resid = resid; + t4_clear_rx_quiesce(toep); +} + +void +tls_received_starting_data(struct adapter *sc, struct toepcb *toep, + struct sockbuf *sb, int len) +{ + MPASS(toep->flags & TPF_TLS_STARTING); + KASSERT(len <= toep->tls.rx_resid, + ("%s: received excess bytes %d (waiting for %zu)", __func__, len, + toep->tls.rx_resid)); + toep->tls.rx_resid -= len; + if (toep->tls.rx_resid != 0) + return; + + tls_check_rx_sockbuf(sc, toep, sb); +} + +static int +do_tls_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) +{ + struct adapter *sc = iq->adapter; + const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); + unsigned int tid = GET_TID(cpl); + struct toepcb *toep; + struct inpcb *inp; + struct socket *so; + struct sockbuf *sb; + + if (cpl->status != CPL_ERR_NONE) + panic("XXX: tcp_rpl failed: %d", cpl->status); + + toep = lookup_tid(sc, tid); + inp = toep->inp; + switch (cpl->cookie) { + case V_WORD(W_TCB_T_FLAGS) | V_COOKIE(CPL_COOKIE_TOM): + INP_WLOCK(inp); + if ((toep->flags & TPF_TLS_STARTING) == 0) + panic("%s: connection is not starting TLS RX\n", + __func__); + + /* Set the ULP mode to ULP_MODE_TLS. */ + toep->params.ulp_mode = ULP_MODE_TLS; + t4_set_tls_tcb_field(toep, W_TCB_ULP_TYPE, + V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), + V_TCB_ULP_TYPE(ULP_MODE_TLS)); + + /* Clear all bits in ULP_RAW except for ENABLE. */ + t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, + V_TCB_ULP_RAW(M_TCB_ULP_RAW), + V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1))); + + /* Clear the entire TLS overlay region: 1023:832. */ + for (u_int word = 832 / 32; word < 1024 / 32; word += 2) + t4_set_tls_tcb_field(toep, word, 0xffffffffffffffff, 0); + + so = inp->inp_socket; + sb = &so->so_rcv; + SOCKBUF_LOCK(sb); + tls_check_rx_sockbuf(sc, toep, sb); + SOCKBUF_UNLOCK(sb); + INP_WUNLOCK(inp); + break; + default: + panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", + G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); + } + + return (0); +} + void t4_tls_mod_load(void) { t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data); t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp); + t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_tls_tcb_rpl, + CPL_COOKIE_TOM); } void @@ -1246,6 +1274,7 @@ t4_register_cpl_handler(CPL_TLS_DATA, NULL); t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL); + t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_TOM); } #endif /* TCP_OFFLOAD */ #endif /* KERN_TLS */ diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h --- a/sys/dev/cxgbe/tom/t4_tom.h +++ b/sys/dev/cxgbe/tom/t4_tom.h @@ -71,12 +71,11 @@ TPF_CPL_PENDING = (1 << 7), /* haven't received the last CPL */ TPF_SYNQE = (1 << 8), /* synq_entry, not really a toepcb */ TPF_SYNQE_EXPANDED = (1 << 9), /* toepcb ready, tid context updated */ - TPF_FORCE_CREDITS = (1 << 10), /* always send credits */ + TPF_TLS_STARTING = (1 << 10), /* starting TLS receive */ TPF_KTLS = (1 << 11), /* send TLS records from KTLS */ TPF_INITIALIZED = (1 << 12), /* init_toepcb has been called */ TPF_TLS_RECEIVE = (1 << 13), /* should receive TLS records */ - TPF_TLS_ESTABLISHED = (1 << 14), /* TLS handshake timer initialized */ - TPF_WAITING_FOR_FINAL = (1<< 15), /* waiting for wakeup on final CPL */ + TPF_WAITING_FOR_FINAL = (1 << 14), /* waiting for wakeup on final CPL */ }; enum { @@ -470,7 +469,6 @@ void send_flowc_wr(struct toepcb *, struct tcpcb *); void send_reset(struct adapter *, struct toepcb *, uint32_t); int send_rx_credits(struct adapter *, struct toepcb *, int); -void send_rx_modulate(struct adapter *, struct toepcb *); void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t); int t4_close_conn(struct adapter *, struct toepcb *); void t4_rcvd(struct toedev *, struct tcpcb *); @@ -519,12 +517,11 @@ bool can_tls_offload(struct adapter *); void do_rx_data_tls(const struct cpl_rx_data *, struct toepcb *, struct mbuf *); void t4_push_ktls(struct adapter *, struct toepcb *, int); +void tls_received_starting_data(struct adapter *, struct toepcb *, + struct sockbuf *, int); void t4_tls_mod_load(void); void t4_tls_mod_unload(void); -void tls_detach(struct toepcb *); -void tls_establish(struct toepcb *); void tls_init_toep(struct toepcb *); -void tls_stop_handshake_timer(struct toepcb *); int tls_tx_key(struct toepcb *); void tls_uninit_toep(struct toepcb *); int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int); diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c --- a/sys/dev/cxgbe/tom/t4_tom.c +++ b/sys/dev/cxgbe/tom/t4_tom.c @@ -394,9 +394,6 @@ } #endif - if (ulp_mode(toep) == ULP_MODE_TLS) - tls_detach(toep); - tp->tod = NULL; tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; @@ -1020,8 +1017,6 @@ if (ulp_mode(toep) == ULP_MODE_TCPDDP) release_ddp_resources(toep); - else if (ulp_mode(toep) == ULP_MODE_TLS) - tls_detach(toep); toep->inp = NULL; need_wakeup = (toep->flags & TPF_WAITING_FOR_FINAL) != 0; toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL); @@ -1260,26 +1255,6 @@ return (htobe64(V_FILTER_TUPLE(ntuple))); } -static int -is_tls_sock(struct socket *so, struct adapter *sc) -{ - struct inpcb *inp = sotoinpcb(so); - int i, rc; - - /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ - rc = 0; - ADAPTER_LOCK(sc); - for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { - if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || - inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { - rc = 1; - break; - } - } - ADAPTER_UNLOCK(sc); - return (rc); -} - /* * Initialize various connection parameters. */ @@ -1350,10 +1325,7 @@ cp->tx_align = 0; /* ULP mode. */ - if (can_tls_offload(sc) && - (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) - cp->ulp_mode = ULP_MODE_TLS; - else if (s->ddp > 0 || + if (s->ddp > 0 || (s->ddp < 0 && sc->tt.ddp && (so_options_get(so) & SO_NO_DDP) == 0)) cp->ulp_mode = ULP_MODE_TCPDDP; else @@ -1362,8 +1334,6 @@ /* Rx coalescing. */ if (s->rx_coalesce >= 0) cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0; - else if (cp->ulp_mode == ULP_MODE_TLS) - cp->rx_coalesce = 0; else if (tt->rx_coalesce >= 0) cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0; else diff --git a/sys/dev/glxsb/glxsb_hash.c b/sys/dev/glxsb/glxsb_hash.c --- a/sys/dev/glxsb/glxsb_hash.c +++ b/sys/dev/glxsb/glxsb_hash.c @@ -70,6 +70,9 @@ union authctx ctx; int error; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + axf = ses->ses_axf; bcopy(ses->ses_ictx, &ctx, axf->ctxsize); error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c --- a/sys/dev/hifn/hifn7751.c +++ b/sys/dev/hifn/hifn7751.c @@ -2650,7 +2650,7 @@ hifnstats.hst_obytes += cmd->dst_mapsize; - if (macbuf != NULL) { + if (macbuf != NULL && (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, cmd->session->hs_mlen, macbuf2); diff --git a/sys/dev/qat/qat.c b/sys/dev/qat/qat.c --- a/sys/dev/qat/qat.c +++ b/sys/dev/qat/qat.c @@ -1823,7 +1823,8 @@ qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF); error = 0; - if ((auth_sz = qs->qs_auth_mlen) != 0) { + if ((auth_sz = qs->qs_auth_mlen) != 0 && + (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { crypto_copydata(crp, crp->crp_digest_start, auth_sz, icv); diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c --- a/sys/dev/safe/safe.c +++ b/sys/dev/safe/safe.c @@ -1317,7 +1317,9 @@ bswap32(re->re_sastate.sa_saved_indigest[2]); } - if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + /* NOP */; + else if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash); if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, diff --git a/sys/dev/safexcel/safexcel.c b/sys/dev/safexcel/safexcel.c --- a/sys/dev/safexcel/safexcel.c +++ b/sys/dev/safexcel/safexcel.c @@ -2163,7 +2163,7 @@ safexcel_append_segs(segs, nseg, ring->res_data, crp->crp_payload_start, crp->crp_payload_length); } - if (sess->digestlen > 0) { + if (sess->digestlen > 0 && (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { safexcel_append_segs(segs, nseg, ring->cmd_data, crp->crp_digest_start, sess->digestlen); diff --git a/sys/dev/sec/sec.c b/sys/dev/sec/sec.c --- a/sys/dev/sec/sec.c +++ b/sys/dev/sec/sec.c @@ -570,7 +570,8 @@ crp->crp_etype = desc->sd_error; if (crp->crp_etype == 0) { ses = crypto_get_driver_session(crp->crp_session); - if (ses->ss_mlen != 0) { + if (ses->ss_mlen != 0 && + (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -299,7 +299,7 @@ static void ktls_cleanup(struct ktls_session *tls); #if defined(INET) || defined(INET6) -static void ktls_reset_send_tag(void *context, int pending); +static void ktls_reset_send_receive_tag(void *context, int pending); #endif static void ktls_work_thread(void *ctx); static void ktls_alloc_thread(void *ctx); @@ -504,7 +504,7 @@ #if defined(INET) || defined(INET6) static int ktls_create_session(struct socket *so, struct tls_enable *en, - struct ktls_session **tlsp) + struct ktls_session **tlsp, int direction) { struct ktls_session *tls; int error; @@ -609,9 +609,10 @@ counter_u64_add(ktls_offload_active, 1); refcount_init(&tls->refcount, 1); - TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls); + TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_receive_tag, tls); tls->wq_index = ktls_get_cpu(so); + tls->direction = direction; tls->params.cipher_algorithm = en->cipher_algorithm; tls->params.auth_algorithm = en->auth_algorithm; @@ -744,11 +745,12 @@ counter_u64_add(ktls_offload_active, 1); refcount_init(&tls_new->refcount, 1); - TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new); + TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_receive_tag, tls_new); /* Copy fields from existing session. */ tls_new->params = tls->params; tls_new->wq_index = tls->wq_index; + tls_new->direction = tls->direction; /* Deep copy keys. */ if (tls_new->params.auth_key != NULL) { @@ -785,7 +787,6 @@ counter_u64_add(ktls_sw_chacha20, -1); break; } - ktls_ocf_free(tls); break; case TCP_TLS_MODE_IFNET: switch (tls->params.cipher_algorithm) { @@ -799,8 +800,8 @@ counter_u64_add(ktls_ifnet_chacha20, -1); break; } - if (tls->snd_tag != NULL) - m_snd_tag_rele(tls->snd_tag); + if (tls->snd_rcv_tag != NULL) + m_snd_tag_rele(tls->snd_rcv_tag); break; #ifdef TCP_OFFLOAD case TCP_TLS_MODE_TOE: @@ -818,6 +819,8 @@ break; #endif } + if (tls->ocf_session != NULL) + ktls_ocf_free(tls); if (tls->params.auth_key != NULL) { zfree(tls->params.auth_key, M_KTLS); tls->params.auth_key = NULL; @@ -980,39 +983,143 @@ return (error); } +/* + * Common code for allocating a TLS receive tag for doing HW + * decryption of TLS data. + * + * This function allocates a new TLS receive tag on whatever interface + * the connection is currently routed over. + */ static int -ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force) +ktls_alloc_rcv_tag(struct inpcb *inp, struct ktls_session *tls, bool force, + struct m_snd_tag **mstp) { - struct m_snd_tag *mst; + union if_snd_tag_alloc_params params; + struct ifnet *ifp; + struct nhop_object *nh; + struct tcpcb *tp; int error; - error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); - if (error == 0) { - tls->mode = TCP_TLS_MODE_IFNET; - tls->snd_tag = mst; - switch (tls->params.cipher_algorithm) { - case CRYPTO_AES_CBC: - counter_u64_add(ktls_ifnet_cbc, 1); - break; - case CRYPTO_AES_NIST_GCM_16: - counter_u64_add(ktls_ifnet_gcm, 1); - break; - case CRYPTO_CHACHA20_POLY1305: - counter_u64_add(ktls_ifnet_chacha20, 1); - break; + INP_RLOCK(inp); + if (inp->inp_flags2 & INP_FREED) { + INP_RUNLOCK(inp); + return (ECONNRESET); + } + if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { + INP_RUNLOCK(inp); + return (ECONNRESET); + } + if (inp->inp_socket == NULL) { + INP_RUNLOCK(inp); + return (ECONNRESET); + } + tp = intotcpcb(inp); + + /* + * Check administrative controls on ifnet TLS to determine if + * ifnet TLS should be denied. + * + * - Always permit 'force' requests. + * - ktls_ifnet_permitted == 0: always deny. + */ + if (!force && ktls_ifnet_permitted == 0) { + INP_RUNLOCK(inp); + return (ENXIO); + } + + /* + * XXX: Use the cached route in the inpcb to find the + * interface. This should perhaps instead use + * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only + * enabled after a connection has completed key negotiation in + * userland, the cached route will be present in practice. + */ + nh = inp->inp_route.ro_nh; + if (nh == NULL) { + INP_RUNLOCK(inp); + return (ENXIO); + } + ifp = nh->nh_ifp; + if_ref(ifp); + + params.hdr.type = IF_SND_TAG_TYPE_TLS_RX; + params.hdr.flowid = inp->inp_flowid; + params.hdr.flowtype = inp->inp_flowtype; + params.hdr.numa_domain = inp->inp_numa_domain; + params.tls_rx.inp = inp; + params.tls_rx.tls = tls; + params.tls_rx.next_tls_hdr_tcp_sn = + tp->rcv_nxt - sbavail(&inp->inp_socket->so_rcv); + + INP_RUNLOCK(inp); + + if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) { + error = EOPNOTSUPP; + goto out; + } + + /* XXX reusing TXTLS flags */ + if (inp->inp_vflag & INP_IPV6) { + if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) { + error = EOPNOTSUPP; + goto out; + } + } else { + if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) { + error = EOPNOTSUPP; + goto out; } } + error = m_snd_tag_alloc(ifp, ¶ms, mstp); +out: + if_rele(ifp); return (error); } static int -ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) +ktls_try_ifnet(struct socket *so, struct ktls_session *tls, int direction, bool force) { + struct m_snd_tag *mst; int error; - error = ktls_ocf_try(so, tls, direction); - if (error) - return (error); + switch (direction) { + case KTLS_TX: + error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst); + if (__predict_false(error != 0)) + goto done; + break; + case KTLS_RX: + error = ktls_alloc_rcv_tag(so->so_pcb, tls, force, &mst); + if (__predict_false(error != 0)) + goto done; + break; + default: + return (EINVAL); + } + + tls->mode = TCP_TLS_MODE_IFNET; + tls->snd_rcv_tag = mst; + + switch (tls->params.cipher_algorithm) { + case CRYPTO_AES_CBC: + counter_u64_add(ktls_ifnet_cbc, 1); + break; + case CRYPTO_AES_NIST_GCM_16: + counter_u64_add(ktls_ifnet_gcm, 1); + break; + case CRYPTO_CHACHA20_POLY1305: + counter_u64_add(ktls_ifnet_chacha20, 1); + break; + default: + break; + } +done: + return (error); +} + +static void +ktls_use_sw(struct ktls_session *tls) +{ tls->mode = TCP_TLS_MODE_SW; switch (tls->params.cipher_algorithm) { case CRYPTO_AES_CBC: @@ -1025,6 +1132,17 @@ counter_u64_add(ktls_sw_chacha20, 1); break; } +} + +static int +ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) +{ + int error; + + error = ktls_ocf_try(so, tls, direction); + if (error) + return (error); + ktls_use_sw(tls); return (0); } @@ -1083,6 +1201,69 @@ sb->sb_ccc)); } +/* + * Return information about the pending TLS data in a socket + * buffer. On return, 'seqno' is set to the sequence number + * of the next TLS record to be received, 'resid' is set to + * the amount of bytes still needed for the last pending + * record. The function returns 'false' if the last pending + * record contains a partial TLS header. In that case, 'resid' + * is the number of bytes needed to complete the TLS header. + */ +bool +ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp) +{ + struct tls_record_layer hdr; + struct mbuf *m; + uint64_t seqno; + size_t resid; + u_int offset, record_len; + + SOCKBUF_LOCK_ASSERT(sb); + MPASS(sb->sb_flags & SB_TLS_RX); + seqno = sb->sb_tls_seqno; + resid = sb->sb_tlscc; + m = sb->sb_mtls; + offset = 0; + + if (resid == 0) { + *seqnop = seqno; + *residp = 0; + return (true); + } + + for (;;) { + seqno++; + + if (resid < sizeof(hdr)) { + *seqnop = seqno; + *residp = sizeof(hdr) - resid; + return (false); + } + + m_copydata(m, offset, sizeof(hdr), (void *)&hdr); + + record_len = sizeof(hdr) + ntohs(hdr.tls_length); + if (resid <= record_len) { + *seqnop = seqno; + *residp = record_len - resid; + return (true); + } + resid -= record_len; + + while (record_len != 0) { + if (m->m_len - offset > record_len) { + offset += record_len; + break; + } + + record_len -= (m->m_len - offset); + offset = 0; + m = m->m_next; + } + } +} + int ktls_enable_rx(struct socket *so, struct tls_enable *en) { @@ -1118,16 +1299,11 @@ en->tls_vminor == TLS_MINOR_VER_THREE) return (ENOTSUP); - error = ktls_create_session(so, en, &tls); + error = ktls_create_session(so, en, &tls, KTLS_RX); if (error) return (error); -#ifdef TCP_OFFLOAD - error = ktls_try_toe(so, tls, KTLS_RX); - if (error) -#endif - error = ktls_try_sw(so, tls, KTLS_RX); - + error = ktls_ocf_try(so, tls, KTLS_RX); if (error) { ktls_cleanup(tls); return (error); @@ -1140,12 +1316,19 @@ so->so_rcv.sb_flags |= SB_TLS_RX; /* Mark existing data as not ready until it can be decrypted. */ - if (tls->mode != TCP_TLS_MODE_TOE) { - sb_mark_notready(&so->so_rcv); - ktls_check_rx(&so->so_rcv); - } + sb_mark_notready(&so->so_rcv); + ktls_check_rx(&so->so_rcv); SOCKBUF_UNLOCK(&so->so_rcv); + /* Prefer TOE -> ifnet TLS -> software TLS. */ +#ifdef TCP_OFFLOAD + error = ktls_try_toe(so, tls, KTLS_RX); + if (error) +#endif + error = ktls_try_ifnet(so, tls, KTLS_RX, false); + if (error) + ktls_use_sw(tls); + counter_u64_add(ktls_offload_total, 1); return (0); @@ -1186,7 +1369,7 @@ if (mb_use_ext_pgs == 0) return (ENXIO); - error = ktls_create_session(so, en, &tls); + error = ktls_create_session(so, en, &tls, KTLS_TX); if (error) return (error); @@ -1195,7 +1378,7 @@ error = ktls_try_toe(so, tls, KTLS_TX); if (error) #endif - error = ktls_try_ifnet(so, tls, false); + error = ktls_try_ifnet(so, tls, KTLS_TX, false); if (error) error = ktls_try_sw(so, tls, KTLS_TX); @@ -1312,7 +1495,7 @@ tls_new = ktls_clone_session(tls); if (mode == TCP_TLS_MODE_IFNET) - error = ktls_try_ifnet(so, tls_new, true); + error = ktls_try_ifnet(so, tls_new, KTLS_TX, true); else error = ktls_try_sw(so, tls_new, KTLS_TX); if (error) { @@ -1371,19 +1554,21 @@ } /* - * Try to allocate a new TLS send tag. This task is scheduled when - * ip_output detects a route change while trying to transmit a packet - * holding a TLS record. If a new tag is allocated, replace the tag - * in the TLS session. Subsequent packets on the connection will use - * the new tag. If a new tag cannot be allocated, drop the - * connection. + * ktls_reset_send_receive_tag - try to allocate a new TLS send or receive tag. + * + * This task is scheduled when ip_output detects a route change while + * trying to transmit a packet holding a TLS record. If a new tag is + * allocated, replace the tag in the TLS session. Subsequent packets + * on the connection will use the new tag. If a new tag cannot be + * allocated, drop the connection. */ static void -ktls_reset_send_tag(void *context, int pending) +ktls_reset_send_receive_tag(void *context, int pending) { struct epoch_tracker et; struct ktls_session *tls; - struct m_snd_tag *old, *new; + struct m_snd_tag *snd_rcv_old; + struct m_snd_tag *snd_rcv_new; struct inpcb *inp; struct tcpcb *tp; int error; @@ -1399,72 +1584,81 @@ * an ifp mismatch and drop packets until a new tag is * allocated. * - * Write-lock the INP when changing tls->snd_tag since + * Write-lock the INP when changing tls->snd_rcv_tag since * ip[6]_output_send() holds a read-lock when reading the * pointer. */ INP_WLOCK(inp); - old = tls->snd_tag; - tls->snd_tag = NULL; + snd_rcv_old = tls->snd_rcv_tag; + tls->snd_rcv_tag = NULL; INP_WUNLOCK(inp); - if (old != NULL) - m_snd_tag_rele(old); - error = ktls_alloc_snd_tag(inp, tls, true, &new); + if (snd_rcv_old != NULL) + m_snd_tag_rele(snd_rcv_old); - if (error == 0) { - INP_WLOCK(inp); - tls->snd_tag = new; - mtx_pool_lock(mtxpool_sleep, tls); - tls->reset_pending = false; - mtx_pool_unlock(mtxpool_sleep, tls); - if (!in_pcbrele_wlocked(inp)) - INP_WUNLOCK(inp); + switch (tls->direction) { + case KTLS_TX: + error = ktls_alloc_snd_tag(inp, tls, true, &snd_rcv_new); + break; + case KTLS_RX: + error = ktls_alloc_rcv_tag(inp, tls, true, &snd_rcv_new); + break; + default: + goto drop_connection; + } + if (error != 0) + goto drop_connection; - counter_u64_add(ktls_ifnet_reset, 1); + INP_WLOCK(inp); + tls->snd_rcv_tag = snd_rcv_new; - /* - * XXX: Should we kick tcp_output explicitly now that - * the send tag is fixed or just rely on timers? - */ - } else { - NET_EPOCH_ENTER(et); - INP_WLOCK(inp); - if (!in_pcbrele_wlocked(inp)) { - if (!(inp->inp_flags & INP_TIMEWAIT) && - !(inp->inp_flags & INP_DROPPED)) { - tp = intotcpcb(inp); - CURVNET_SET(tp->t_vnet); - tp = tcp_drop(tp, ECONNABORTED); - CURVNET_RESTORE(); - if (tp != NULL) - INP_WUNLOCK(inp); - counter_u64_add(ktls_ifnet_reset_dropped, 1); - } else - INP_WUNLOCK(inp); - } - NET_EPOCH_EXIT(et); + mtx_pool_lock(mtxpool_sleep, tls); + tls->reset_pending = false; + mtx_pool_unlock(mtxpool_sleep, tls); - counter_u64_add(ktls_ifnet_reset_failed, 1); + if (!in_pcbrele_wlocked(inp)) + INP_WUNLOCK(inp); - /* - * Leave reset_pending true to avoid future tasks while - * the socket goes away. - */ - } + counter_u64_add(ktls_ifnet_reset, 1); ktls_free(tls); -} -int -ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls) -{ + /* + * XXX: Should we kick tcp_output explicitly now that + * the send tag is fixed or just rely on timers? + */ + return; - if (inp == NULL) - return (ENOBUFS); +drop_connection: + NET_EPOCH_ENTER(et); + INP_WLOCK(inp); + if (!in_pcbrele_wlocked(inp)) { + if (!(inp->inp_flags & INP_TIMEWAIT) && + !(inp->inp_flags & INP_DROPPED)) { + tp = intotcpcb(inp); + CURVNET_SET(tp->t_vnet); + tp = tcp_drop(tp, ECONNABORTED); + CURVNET_RESTORE(); + if (tp != NULL) + INP_WUNLOCK(inp); + counter_u64_add(ktls_ifnet_reset_dropped, 1); + } else + INP_WUNLOCK(inp); + } + NET_EPOCH_EXIT(et); - INP_LOCK_ASSERT(inp); + counter_u64_add(ktls_ifnet_reset_failed, 1); + /* + * Leave reset_pending true to avoid future tasks while + * the socket goes away. + */ + ktls_free(tls); +} + +static void +ktls_output_eagain_tls(struct inpcb *inp, struct ktls_session *tls) +{ /* * See if we should schedule a task to update the send tag for * this session. @@ -1478,6 +1672,30 @@ taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task); } mtx_pool_unlock(mtxpool_sleep, tls); +} + +int +ktls_output_eagain(struct inpcb *inp) +{ + struct socket *so; + struct ktls_session *tls; + + if (__predict_false(inp == NULL)) + goto done; + INP_LOCK_ASSERT(inp); + + so = inp->inp_socket; + if (__predict_false(so == NULL)) + goto done; + + tls = so->so_rcv.sb_tls_info; + if (__predict_true(tls != NULL)) + ktls_output_eagain_tls(inp, tls); + + tls = so->so_snd.sb_tls_info; + if (__predict_true(tls != NULL)) + ktls_output_eagain_tls(inp, tls); +done: return (ENOBUFS); } @@ -1496,7 +1714,7 @@ MPASS(tls->mode == TCP_TLS_MODE_IFNET); - if (tls->snd_tag == NULL) { + if (tls->snd_rcv_tag == NULL) { /* * Resetting send tag, ignore this change. The * pending reset may or may not see this updated rate @@ -1506,10 +1724,11 @@ return (0); } - MPASS(tls->snd_tag != NULL); - MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); + mst = tls->snd_rcv_tag; + + MPASS(mst != NULL); + MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT); - mst = tls->snd_tag; return (mst->sw->snd_tag_modify(mst, ¶ms)); } #endif @@ -1792,7 +2011,7 @@ return (NULL); } } - n->m_flags |= M_NOTREADY; + n->m_flags |= M_NOTREADY | (m->m_flags & M_DECRYPTED); /* Store remainder in 'n'. */ n->m_len = m->m_len - remain; @@ -1830,6 +2049,81 @@ return (top); } +/* + * Check if a mbuf chain is fully decrypted at the given offset and + * length. Returns -1 if all data is decrypted. 0 if there is a mix of + * encrypted and decrypted data. Else 1 if all data is encrypted. + */ +int +ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len) +{ + int m_flags_ored = 0; + int m_flags_anded = -1; + + for (; mb != NULL; mb = mb->m_next) { + if (offset < mb->m_len) + break; + offset -= mb->m_len; + } + offset += len; + + for (; mb != NULL; mb = mb->m_next) { + m_flags_ored |= mb->m_flags; + m_flags_anded &= mb->m_flags; + + if (offset <= mb->m_len) + break; + offset -= mb->m_len; + } + MPASS(mb != NULL || offset == 0); + + if ((m_flags_ored ^ m_flags_anded) & M_DECRYPTED) + return (0); /* mixed */ + else + return ((m_flags_ored & M_DECRYPTED) ? -1 : 1); +} + +/* + * ktls_resync_ifnet - get TLS RX back on track after packet loss + */ +static int +ktls_resync_ifnet(struct socket *so) +{ + union if_snd_tag_modify_params params; + struct m_snd_tag *mst; + struct inpcb *inp; + struct tcpcb *tp; + + mst = so->so_rcv.sb_tls_info->snd_rcv_tag; + if (__predict_false(mst == NULL)) + return (EINVAL); + + inp = sotoinpcb(so); + if (__predict_false(inp == NULL)) + return (EINVAL); + + INP_RLOCK(inp); + if (inp->inp_flags2 & INP_FREED) { + INP_RUNLOCK(inp); + return (ECONNRESET); + } + if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { + INP_RUNLOCK(inp); + return (ECONNRESET); + } + + tp = intotcpcb(inp); + MPASS(tp != NULL); + + /* Get the TCP sequence number of the next TLS header. */ + params.tls_rx.next_tls_hdr_tcp_sn = + tp->rcv_nxt - sbavail(&so->so_rcv); + INP_RUNLOCK(inp); + + MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RX); + return (mst->sw->snd_tag_modify(mst, ¶ms)); +} + static void ktls_decrypt(struct socket *so) { @@ -1902,7 +2196,61 @@ SBCHECK(sb); SOCKBUF_UNLOCK(sb); - error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len); + switch (ktls_mbuf_crypto_state(data, 0, tls_len)) { + struct mbuf *m0; + struct mbuf *m1; + int m0_off; + int m1_off; + int off; + case 0: + m0 = data; + m1 = m_copypacket(data, M_WAITOK); + + /* Perform XOR of crypto sequence. */ + error = tls->sw_decrypt(tls, hdr, m1, seqno, &trail_len, false); + if (__predict_false(error != 0)) { + m_freem(m1); + break; + } + + /* Reconstruct encrypted mbuf data. */ + for (off = m0_off = m1_off = 0; off != tls_len; ) { + int delta = MIN(m0->m_len - m0_off, m1->m_len - m1_off); + int maxlen = tls_len - off - tls->params.tls_tlen; + + if (delta > maxlen) + delta = maxlen; + + /* Copy encrypted data back over the decrypted data. */ + if (m0->m_flags & M_DECRYPTED) + m_copydata(m1, m1_off, delta, mtod(m0, caddr_t) + m0_off); + + off += delta; + m0_off += delta; + m1_off += delta; + + if (m0_off == m0->m_len) { + m0 = m0->m_next; + m0_off = 0; + } + + if (m1_off == m1->m_len) { + m1 = m_free(m1); + m1_off = 0; + } + } + + m_freem(m1); + + /* FALLTHROUGH */ + case 1: + error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len, true); + break; + default: + error = 0; + break; + } + if (error) { counter_u64_add(ktls_offload_failed_crypto, 1); @@ -1977,17 +2325,17 @@ data = m_free(data); } - /* Trim trailer and clear M_NOTREADY. */ + /* Trim trailer and clear M_NOTREADY and M_DECRYPTED. */ remain = be16toh(tgr.tls_length); m = data; for (m = data; remain > m->m_len; m = m->m_next) { - m->m_flags &= ~M_NOTREADY; + m->m_flags &= ~(M_NOTREADY | M_DECRYPTED); remain -= m->m_len; } m->m_len = remain; m_freem(m->m_next); m->m_next = NULL; - m->m_flags &= ~M_NOTREADY; + m->m_flags &= ~(M_NOTREADY | M_DECRYPTED); /* Set EOR on the final mbuf. */ m->m_flags |= M_EOR; @@ -2003,6 +2351,7 @@ sorwakeup_locked(so); + ktls_resync_ifnet(so); deref: SOCKBUF_UNLOCK_ASSERT(sb); diff --git a/sys/mips/cavium/cryptocteon/cryptocteon.c b/sys/mips/cavium/cryptocteon/cryptocteon.c --- a/sys/mips/cavium/cryptocteon/cryptocteon.c +++ b/sys/mips/cavium/cryptocteon/cryptocteon.c @@ -387,7 +387,8 @@ (*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv, ivp); - if (csp->csp_auth_alg != 0) { + if (csp->csp_auth_alg != 0 && + (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, od->octo_mlen, icv2); diff --git a/sys/mips/nlm/dev/sec/nlmsec.c b/sys/mips/nlm/dev/sec/nlmsec.c --- a/sys/mips/nlm/dev/sec/nlmsec.c +++ b/sys/mips/nlm/dev/sec/nlmsec.c @@ -284,7 +284,8 @@ sc->sc_needwakeup &= ~CRYPTO_SYMQ; } } - if (cmd->hash_dst_len != 0) { + if (cmd->hash_dst_len != 0 && + (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) { if (cmd->crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(cmd->crp, cmd->crp->crp_digest_start, cmd->hash_dst_len, hash); diff --git a/sys/net/if_var.h b/sys/net/if_var.h --- a/sys/net/if_var.h +++ b/sys/net/if_var.h @@ -192,7 +192,8 @@ #define IF_SND_TAG_TYPE_UNLIMITED 1 #define IF_SND_TAG_TYPE_TLS 2 #define IF_SND_TAG_TYPE_TLS_RATE_LIMIT 3 -#define IF_SND_TAG_TYPE_MAX 4 +#define IF_SND_TAG_TYPE_TLS_RX 4 +#define IF_SND_TAG_TYPE_MAX 5 struct if_snd_tag_alloc_header { uint32_t type; /* send tag type, see IF_SND_TAG_XXX */ @@ -214,6 +215,15 @@ const struct ktls_session *tls; }; +struct if_snd_tag_alloc_tls_rx { + struct if_snd_tag_alloc_header hdr; + struct inpcb *inp; + const struct ktls_session *tls; + + /* TCP sequence number in host endian format */ + uint32_t next_tls_hdr_tcp_sn; +}; + struct if_snd_tag_alloc_tls_rate_limit { struct if_snd_tag_alloc_header hdr; struct inpcb *inp; @@ -229,11 +239,17 @@ uint32_t flags; /* M_NOWAIT or M_WAITOK */ }; +struct if_snd_tag_modify_tls_rx { + /* TCP sequence number in host endian format */ + uint32_t next_tls_hdr_tcp_sn; +}; + union if_snd_tag_alloc_params { struct if_snd_tag_alloc_header hdr; struct if_snd_tag_alloc_rate_limit rate_limit; struct if_snd_tag_alloc_rate_limit unlimited; struct if_snd_tag_alloc_tls tls; + struct if_snd_tag_alloc_tls_rx tls_rx; struct if_snd_tag_alloc_tls_rate_limit tls_rate_limit; }; @@ -241,6 +257,7 @@ struct if_snd_tag_rate_limit_params rate_limit; struct if_snd_tag_rate_limit_params unlimited; struct if_snd_tag_rate_limit_params tls_rate_limit; + struct if_snd_tag_modify_tls_rx tls_rx; }; union if_snd_tag_query_params { diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c --- a/sys/netinet/ip_output.c +++ b/sys/netinet/ip_output.c @@ -231,7 +231,7 @@ */ if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) { tls = ktls_hold(m->m_next->m_epg_tls); - mst = tls->snd_tag; + mst = tls->snd_rcv_tag; /* * If a TLS session doesn't have a valid tag, it must @@ -279,7 +279,7 @@ #ifdef KERN_TLS if (tls != NULL) { if (error == EAGAIN) - error = ktls_output_eagain(inp, tls); + error = ktls_output_eagain(inp); ktls_free(tls); } #endif diff --git a/sys/netinet/tcp_ratelimit.c b/sys/netinet/tcp_ratelimit.c --- a/sys/netinet/tcp_ratelimit.c +++ b/sys/netinet/tcp_ratelimit.c @@ -1354,14 +1354,14 @@ if (rte) rl_increment_using(rte); #ifdef KERN_TLS - if (rte != NULL && tls != NULL && tls->snd_tag != NULL) { + if (rte != NULL && tls != NULL && tls->snd_rcv_tag != NULL) { /* * Fake a route change error to reset the TLS * send tag. This will convert the existing * tag to a TLS ratelimit tag. */ - MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS); - ktls_output_eagain(tp->t_inpcb, tls); + MPASS(tls->snd_rcv_tag->sw->type == IF_SND_TAG_TYPE_TLS); + ktls_output_eagain(tp->t_inpcb); } #endif } else { @@ -1404,8 +1404,8 @@ if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info; MPASS(tls->mode == TCP_TLS_MODE_IFNET); - if (tls->snd_tag != NULL && - tls->snd_tag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT) { + if (tls->snd_rcv_tag != NULL && + tls->snd_rcv_tag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT) { /* * NIC probably doesn't support ratelimit TLS * tags if it didn't allocate one when an diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c --- a/sys/netinet6/ip6_output.c +++ b/sys/netinet6/ip6_output.c @@ -344,7 +344,7 @@ */ if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) { tls = ktls_hold(m->m_next->m_epg_tls); - mst = tls->snd_tag; + mst = tls->snd_rcv_tag; /* * If a TLS session doesn't have a valid tag, it must @@ -392,7 +392,7 @@ #ifdef KERN_TLS if (tls != NULL) { if (error == EAGAIN) - error = ktls_output_eagain(inp, tls); + error = ktls_output_eagain(inp); ktls_free(tls); } #endif diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c --- a/sys/opencrypto/crypto.c +++ b/sys/opencrypto/crypto.c @@ -1285,14 +1285,17 @@ break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || - crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, + crp->crp_op == CRYPTO_OP_VERIFY_DIGEST || + crp->crp_op == CRYPTO_OP_SKIP_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == - (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), + (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST) || + crp->crp_op == + (CRYPTO_OP_DECRYPT | CRYPTO_OP_SKIP_DIGEST), ("invalid AEAD op %x", crp->crp_op)); KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("AEAD without a separate IV")); @@ -1301,7 +1304,9 @@ KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == - (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), + (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST) || + crp->crp_op == + (CRYPTO_OP_DECRYPT | CRYPTO_OP_SKIP_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } @@ -1359,7 +1364,10 @@ crp->crp_payload_length <= olen, ("payload outside output buffer")); } - if (csp->csp_mode == CSP_MODE_DIGEST || + + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + /* NOP */; + else if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h --- a/sys/opencrypto/cryptodev.h +++ b/sys/opencrypto/cryptodev.h @@ -583,6 +583,7 @@ #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) +#define CRYPTO_OP_SKIP_DIGEST 0x4 /* skip all digest processing */ /* * Hints passed to process methods. diff --git a/sys/opencrypto/cryptodev.c b/sys/opencrypto/cryptodev.c --- a/sys/opencrypto/cryptodev.c +++ b/sys/opencrypto/cryptodev.c @@ -981,7 +981,8 @@ } } - if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { + if (cop->mac != NULL && + (crp->crp_op & (CRYPTO_OP_VERIFY_DIGEST | CRYPTO_OP_SKIP_DIGEST)) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, cop->mac, cse->hashsize); if (error) { @@ -1180,7 +1181,7 @@ } } - if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { + if ((crp->crp_op & (CRYPTO_OP_VERIFY_DIGEST | CRYPTO_OP_SKIP_DIGEST)) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, caead->tag, cse->hashsize); if (error) { diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c --- a/sys/opencrypto/cryptosoft.c +++ b/sys/opencrypto/cryptosoft.c @@ -319,6 +319,9 @@ union authctx ctx; int err; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + sw = &ses->swcr_auth; axf = sw->sw_axf; @@ -396,6 +399,9 @@ size_t len; int blksz, error, ivlen, resid; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -700,6 +706,9 @@ const struct auth_hash *axf; int error, ivlen, len; + if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) + return (0); + csp = crypto_get_params(crp->crp_session); swa = &ses->swcr_auth; axf = swa->sw_axf; diff --git a/sys/opencrypto/ktls_ocf.c b/sys/opencrypto/ktls_ocf.c --- a/sys/opencrypto/ktls_ocf.c +++ b/sys/opencrypto/ktls_ocf.c @@ -48,7 +48,7 @@ #include #include -struct ocf_session { +struct ktls_ocf_session { crypto_session_t sid; crypto_session_t mac_sid; struct mtx lock; @@ -64,7 +64,7 @@ }; struct ocf_operation { - struct ocf_session *os; + struct ktls_ocf_session *os; bool done; }; @@ -142,7 +142,7 @@ } static int -ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp) +ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp) { struct ocf_operation oo; int error; @@ -228,7 +228,7 @@ struct uio *uio; struct tls_mac_data *ad; struct cryptop *crp; - struct ocf_session *os; + struct ktls_ocf_session *os; struct iovec iov[m->m_epg_npgs + 2]; u_int pgoff; int i, error; @@ -237,7 +237,7 @@ MPASS(outiovcnt + 1 <= nitems(iov)); - os = tls->cipher; + os = tls->ocf_session; hdr = (const struct tls_record_layer *)m->m_epg_hdr; crp = &state->crp; uio = &state->uio; @@ -376,11 +376,11 @@ struct uio *uio; struct tls_aead_data *ad; struct cryptop *crp; - struct ocf_session *os; + struct ktls_ocf_session *os; int error; uint16_t tls_comp_len; - os = tls->cipher; + os = tls->ocf_session; hdr = (const struct tls_record_layer *)m->m_epg_hdr; crp = &state->crp; uio = &state->uio; @@ -453,16 +453,16 @@ static int ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls, const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, - int *trailer_len) + int *trailer_len, bool verify_digest) { struct tls_aead_data ad; struct cryptop crp; - struct ocf_session *os; + struct ktls_ocf_session *os; struct ocf_operation oo; int error; uint16_t tls_comp_len; - os = tls->cipher; + os = tls->ocf_session; oo.os = os; oo.done = false; @@ -502,7 +502,8 @@ crp.crp_payload_length = tls_comp_len; crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length; - crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; + crp.crp_op = CRYPTO_OP_DECRYPT | + (verify_digest ? CRYPTO_OP_VERIFY_DIGEST : CRYPTO_OP_SKIP_DIGEST); crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; crypto_use_mbuf(&crp, m); @@ -526,11 +527,11 @@ struct uio *uio; struct tls_aead_data_13 *ad; struct cryptop *crp; - struct ocf_session *os; + struct ktls_ocf_session *os; char nonce[12]; int error; - os = tls->cipher; + os = tls->ocf_session; hdr = (const struct tls_record_layer *)m->m_epg_hdr; crp = &state->crp; uio = &state->uio; @@ -598,9 +599,9 @@ void ktls_ocf_free(struct ktls_session *tls) { - struct ocf_session *os; + struct ktls_ocf_session *os; - os = tls->cipher; + os = tls->ocf_session; crypto_freesession(os->sid); mtx_destroy(&os->lock); zfree(os, M_KTLS_OCF); @@ -610,7 +611,7 @@ ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) { struct crypto_session_params csp, mac_csp; - struct ocf_session *os; + struct ktls_ocf_session *os; int error, mac_len; memset(&csp, 0, sizeof(csp)); @@ -745,7 +746,7 @@ } mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF); - tls->cipher = os; + tls->ocf_session = os; if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 || tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) { if (direction == KTLS_TX) { diff --git a/sys/sys/ktls.h b/sys/sys/ktls.h --- a/sys/sys/ktls.h +++ b/sys/sys/ktls.h @@ -167,6 +167,7 @@ #define KTLS_RX 2 struct iovec; +struct ktls_ocf_session; struct ktls_ocf_encrypt_state; struct ktls_session; struct m_snd_tag; @@ -181,16 +182,15 @@ struct iovec *outiov, int outiovcnt); int (*sw_decrypt)(struct ktls_session *tls, const struct tls_record_layer *hdr, struct mbuf *m, - uint64_t seqno, int *trailer_len); - }; - union { - void *cipher; - struct m_snd_tag *snd_tag; + uint64_t seqno, int *trailer_len, bool verify_digest); }; + struct ktls_ocf_session *ocf_session; + struct m_snd_tag *snd_rcv_tag; struct tls_session_params params; u_int wq_index; volatile u_int refcount; int mode; + int direction; struct task reset_tag_task; struct task disable_ifnet_task; @@ -208,6 +208,7 @@ extern unsigned int ktls_ifnet_max_rexmit_pct; void ktls_check_rx(struct sockbuf *sb); +int ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len); void ktls_disable_ifnet(void *arg); int ktls_enable_rx(struct socket *so, struct tls_enable *en); int ktls_enable_tx(struct socket *so, struct tls_enable *en); @@ -220,10 +221,11 @@ int ktls_get_rx_mode(struct socket *so, int *modep); int ktls_set_tx_mode(struct socket *so, int mode); int ktls_get_tx_mode(struct socket *so, int *modep); -int ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls); +int ktls_output_eagain(struct inpcb *inp); #ifdef RATELIMIT int ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate); #endif +bool ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp); static inline struct ktls_session * ktls_hold(struct ktls_session *tls)