Page MenuHomeFreeBSD

D32356.id97544.diff
No OneTemporary

D32356.id97544.diff

diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c
--- a/sys/crypto/aesni/aesni.c
+++ b/sys/crypto/aesni/aesni.c
@@ -855,6 +855,9 @@
const uint8_t *key;
int i, keylen;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
if (crp->crp_auth_key != NULL)
key = crp->crp_auth_key;
else
diff --git a/sys/crypto/blake2/blake2_cryptodev.c b/sys/crypto/blake2/blake2_cryptodev.c
--- a/sys/crypto/blake2/blake2_cryptodev.c
+++ b/sys/crypto/blake2/blake2_cryptodev.c
@@ -342,6 +342,9 @@
int error, rc;
unsigned klen;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
ctx = NULL;
ctxidx = 0;
error = EINVAL;
diff --git a/sys/crypto/ccp/ccp_hardware.c b/sys/crypto/ccp/ccp_hardware.c
--- a/sys/crypto/ccp/ccp_hardware.c
+++ b/sys/crypto/ccp/ccp_hardware.c
@@ -1212,7 +1212,7 @@
s->pending--;
- if (error != 0) {
+ if (error != 0 || (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) != 0) {
crp->crp_etype = error;
goto out;
}
diff --git a/sys/crypto/openssl/ossl.c b/sys/crypto/openssl/ossl.c
--- a/sys/crypto/openssl/ossl.c
+++ b/sys/crypto/openssl/ossl.c
@@ -238,6 +238,9 @@
struct auth_hash *axf;
int error;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
axf = s->hash.axf;
if (crp->crp_auth_key == NULL) {
diff --git a/sys/crypto/via/padlock_hash.c b/sys/crypto/via/padlock_hash.c
--- a/sys/crypto/via/padlock_hash.c
+++ b/sys/crypto/via/padlock_hash.c
@@ -286,6 +286,9 @@
union authctx ctx;
int error;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
axf = ses->ses_axf;
padlock_copy_ctx(axf, ses->ses_ictx, &ctx);
diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c
--- a/sys/dev/cesa/cesa.c
+++ b/sys/dev/cesa/cesa.c
@@ -1528,7 +1528,8 @@
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cr->cr_crp->crp_etype = sc->sc_error;
- if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) {
+ if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0 &&
+ (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(cr->cr_crp,
cr->cr_crp->crp_digest_start,
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c
--- a/sys/dev/cxgbe/crypto/t4_crypto.c
+++ b/sys/dev/cxgbe/crypto/t4_crypto.c
@@ -595,7 +595,7 @@
{
uint8_t hash[HASH_MAX_LEN];
- if (error)
+ if (error || (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) != 0)
return (error);
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
diff --git a/sys/dev/glxsb/glxsb_hash.c b/sys/dev/glxsb/glxsb_hash.c
--- a/sys/dev/glxsb/glxsb_hash.c
+++ b/sys/dev/glxsb/glxsb_hash.c
@@ -70,6 +70,9 @@
union authctx ctx;
int error;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
axf = ses->ses_axf;
bcopy(ses->ses_ictx, &ctx, axf->ctxsize);
error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c
--- a/sys/dev/hifn/hifn7751.c
+++ b/sys/dev/hifn/hifn7751.c
@@ -2650,7 +2650,7 @@
hifnstats.hst_obytes += cmd->dst_mapsize;
- if (macbuf != NULL) {
+ if (macbuf != NULL && (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(crp, crp->crp_digest_start,
cmd->session->hs_mlen, macbuf2);
diff --git a/sys/dev/qat/qat.c b/sys/dev/qat/qat.c
--- a/sys/dev/qat/qat.c
+++ b/sys/dev/qat/qat.c
@@ -1823,7 +1823,8 @@
qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF);
error = 0;
- if ((auth_sz = qs->qs_auth_mlen) != 0) {
+ if ((auth_sz = qs->qs_auth_mlen) != 0 &&
+ (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
crypto_copydata(crp, crp->crp_digest_start,
auth_sz, icv);
diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c
--- a/sys/dev/safe/safe.c
+++ b/sys/dev/safe/safe.c
@@ -1317,7 +1317,9 @@
bswap32(re->re_sastate.sa_saved_indigest[2]);
}
- if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ /* NOP */;
+ else if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(crp, crp->crp_digest_start,
ses->ses_mlen, hash);
if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest,
diff --git a/sys/dev/safexcel/safexcel.c b/sys/dev/safexcel/safexcel.c
--- a/sys/dev/safexcel/safexcel.c
+++ b/sys/dev/safexcel/safexcel.c
@@ -2163,7 +2163,7 @@
safexcel_append_segs(segs, nseg, ring->res_data,
crp->crp_payload_start, crp->crp_payload_length);
}
- if (sess->digestlen > 0) {
+ if (sess->digestlen > 0 && (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
safexcel_append_segs(segs, nseg, ring->cmd_data,
crp->crp_digest_start, sess->digestlen);
diff --git a/sys/dev/sec/sec.c b/sys/dev/sec/sec.c
--- a/sys/dev/sec/sec.c
+++ b/sys/dev/sec/sec.c
@@ -570,7 +570,8 @@
crp->crp_etype = desc->sd_error;
if (crp->crp_etype == 0) {
ses = crypto_get_driver_session(crp->crp_session);
- if (ses->ss_mlen != 0) {
+ if (ses->ss_mlen != 0 &&
+ (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(crp,
crp->crp_digest_start,
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -299,7 +299,7 @@
static void ktls_cleanup(struct ktls_session *tls);
#if defined(INET) || defined(INET6)
-static void ktls_reset_send_tag(void *context, int pending);
+static void ktls_reset_send_receive_tag(void *context, int pending);
#endif
static void ktls_work_thread(void *ctx);
static void ktls_alloc_thread(void *ctx);
@@ -503,7 +503,7 @@
#if defined(INET) || defined(INET6)
static int
ktls_create_session(struct socket *so, struct tls_enable *en,
- struct ktls_session **tlsp)
+ struct ktls_session **tlsp, int direction)
{
struct ktls_session *tls;
int error;
@@ -608,9 +608,10 @@
counter_u64_add(ktls_offload_active, 1);
refcount_init(&tls->refcount, 1);
- TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_tag, tls);
+ TASK_INIT(&tls->reset_tag_task, 0, ktls_reset_send_receive_tag, tls);
tls->wq_index = ktls_get_cpu(so);
+ tls->direction = direction;
tls->params.cipher_algorithm = en->cipher_algorithm;
tls->params.auth_algorithm = en->auth_algorithm;
@@ -743,11 +744,12 @@
counter_u64_add(ktls_offload_active, 1);
refcount_init(&tls_new->refcount, 1);
- TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_tag, tls_new);
+ TASK_INIT(&tls_new->reset_tag_task, 0, ktls_reset_send_receive_tag, tls_new);
/* Copy fields from existing session. */
tls_new->params = tls->params;
tls_new->wq_index = tls->wq_index;
+ tls_new->direction = tls->direction;
/* Deep copy keys. */
if (tls_new->params.auth_key != NULL) {
@@ -797,8 +799,8 @@
counter_u64_add(ktls_ifnet_chacha20, -1);
break;
}
- if (tls->snd_tag != NULL)
- m_snd_tag_rele(tls->snd_tag);
+ if (tls->snd_rcv_tag != NULL)
+ m_snd_tag_rele(tls->snd_rcv_tag);
break;
#ifdef TCP_OFFLOAD
case TCP_TLS_MODE_TOE:
@@ -980,28 +982,135 @@
return (error);
}
+/*
+ * Common code for allocating a TLS receive tag for doing HW
+ * decryption of TLS data.
+ *
+ * This function allocates a new TLS receive tag on whatever interface
+ * the connection is currently routed over.
+ */
static int
-ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
+ktls_alloc_rcv_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
+ struct m_snd_tag **mstp)
{
- struct m_snd_tag *mst;
+ union if_snd_tag_alloc_params params;
+ struct ifnet *ifp;
+ struct nhop_object *nh;
+ struct tcpcb *tp;
int error;
- error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
- if (error == 0) {
- tls->mode = TCP_TLS_MODE_IFNET;
- tls->snd_tag = mst;
- switch (tls->params.cipher_algorithm) {
- case CRYPTO_AES_CBC:
- counter_u64_add(ktls_ifnet_cbc, 1);
- break;
- case CRYPTO_AES_NIST_GCM_16:
- counter_u64_add(ktls_ifnet_gcm, 1);
- break;
- case CRYPTO_CHACHA20_POLY1305:
- counter_u64_add(ktls_ifnet_chacha20, 1);
- break;
+ INP_RLOCK(inp);
+ if (inp->inp_flags2 & INP_FREED) {
+ INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+ if (inp->inp_socket == NULL) {
+ INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+ tp = intotcpcb(inp);
+
+ /*
+ * Check administrative controls on ifnet TLS to determine if
+ * ifnet TLS should be denied.
+ *
+ * - Always permit 'force' requests.
+ * - ktls_ifnet_permitted == 0: always deny.
+ */
+ if (!force && ktls_ifnet_permitted == 0) {
+ INP_RUNLOCK(inp);
+ return (ENXIO);
+ }
+
+ /*
+ * XXX: Use the cached route in the inpcb to find the
+ * interface. This should perhaps instead use
+ * rtalloc1_fib(dst, 0, 0, fibnum). Since KTLS is only
+ * enabled after a connection has completed key negotiation in
+ * userland, the cached route will be present in practice.
+ */
+ nh = inp->inp_route.ro_nh;
+ if (nh == NULL) {
+ INP_RUNLOCK(inp);
+ return (ENXIO);
+ }
+ ifp = nh->nh_ifp;
+ if_ref(ifp);
+
+ params.hdr.type = IF_SND_TAG_TYPE_TLS_RX;
+ params.hdr.flowid = inp->inp_flowid;
+ params.hdr.flowtype = inp->inp_flowtype;
+ params.hdr.numa_domain = inp->inp_numa_domain;
+ params.tls_rx.inp = inp;
+ params.tls_rx.tls = tls;
+
+ INP_RUNLOCK(inp);
+
+ if ((ifp->if_capenable & IFCAP_MEXTPG) == 0) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ /* XXX reusing TXTLS flags */
+ if (inp->inp_vflag & INP_IPV6) {
+ if ((ifp->if_capenable & IFCAP_TXTLS6) == 0) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+ } else {
+ if ((ifp->if_capenable & IFCAP_TXTLS4) == 0) {
+ error = EOPNOTSUPP;
+ goto out;
}
}
+ error = m_snd_tag_alloc(ifp, &params, mstp);
+out:
+ if_rele(ifp);
+ return (error);
+}
+
+static int
+ktls_try_ifnet(struct socket *so, struct ktls_session *tls, int direction, bool force)
+{
+ struct m_snd_tag *mst;
+ int error;
+
+ switch (direction) {
+ case KTLS_TX:
+ error = ktls_alloc_snd_tag(so->so_pcb, tls, force, &mst);
+ if (__predict_false(error != 0))
+ goto done;
+ break;
+ case KTLS_RX:
+ error = ktls_alloc_rcv_tag(so->so_pcb, tls, force, &mst);
+ if (__predict_false(error != 0))
+ goto done;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ tls->mode = TCP_TLS_MODE_IFNET;
+ tls->snd_rcv_tag = mst;
+
+ switch (tls->params.cipher_algorithm) {
+ case CRYPTO_AES_CBC:
+ counter_u64_add(ktls_ifnet_cbc, 1);
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ counter_u64_add(ktls_ifnet_gcm, 1);
+ break;
+ case CRYPTO_CHACHA20_POLY1305:
+ counter_u64_add(ktls_ifnet_chacha20, 1);
+ break;
+ default:
+ break;
+ }
+done:
return (error);
}
@@ -1187,7 +1296,7 @@
en->tls_vminor == TLS_MINOR_VER_THREE)
return (ENOTSUP);
- error = ktls_create_session(so, en, &tls);
+ error = ktls_create_session(so, en, &tls, KTLS_RX);
if (error)
return (error);
@@ -1210,10 +1319,13 @@
}
SOCKBUF_UNLOCK(&so->so_rcv);
+ /* Prefer TOE -> ifnet TLS -> software TLS. */
#ifdef TCP_OFFLOAD
error = ktls_try_toe(so, tls, KTLS_RX);
if (error)
#endif
+ error = ktls_try_ifnet(so, tls, KTLS_RX, false);
+ if (error)
ktls_use_sw(tls);
counter_u64_add(ktls_offload_total, 1);
@@ -1256,7 +1368,7 @@
if (mb_use_ext_pgs == 0)
return (ENXIO);
- error = ktls_create_session(so, en, &tls);
+ error = ktls_create_session(so, en, &tls, KTLS_TX);
if (error)
return (error);
@@ -1265,7 +1377,7 @@
error = ktls_try_toe(so, tls, KTLS_TX);
if (error)
#endif
- error = ktls_try_ifnet(so, tls, false);
+ error = ktls_try_ifnet(so, tls, KTLS_TX, false);
if (error)
error = ktls_try_sw(so, tls, KTLS_TX);
@@ -1382,7 +1494,7 @@
tls_new = ktls_clone_session(tls);
if (mode == TCP_TLS_MODE_IFNET)
- error = ktls_try_ifnet(so, tls_new, true);
+ error = ktls_try_ifnet(so, tls_new, KTLS_TX, true);
else
error = ktls_try_sw(so, tls_new, KTLS_TX);
if (error) {
@@ -1441,19 +1553,21 @@
}
/*
- * Try to allocate a new TLS send tag. This task is scheduled when
- * ip_output detects a route change while trying to transmit a packet
- * holding a TLS record. If a new tag is allocated, replace the tag
- * in the TLS session. Subsequent packets on the connection will use
- * the new tag. If a new tag cannot be allocated, drop the
- * connection.
+ * ktls_reset_send_receive_tag - try to allocate a new TLS send or receive tag.
+ *
+ * This task is scheduled when ip_output detects a route change while
+ * trying to transmit a packet holding a TLS record. If a new tag is
+ * allocated, replace the tag in the TLS session. Subsequent packets
+ * on the connection will use the new tag. If a new tag cannot be
+ * allocated, drop the connection.
*/
static void
-ktls_reset_send_tag(void *context, int pending)
+ktls_reset_send_receive_tag(void *context, int pending)
{
struct epoch_tracker et;
struct ktls_session *tls;
- struct m_snd_tag *old, *new;
+ struct m_snd_tag *snd_rcv_old;
+ struct m_snd_tag *snd_rcv_new;
struct inpcb *inp;
struct tcpcb *tp;
int error;
@@ -1469,72 +1583,81 @@
* an ifp mismatch and drop packets until a new tag is
* allocated.
*
- * Write-lock the INP when changing tls->snd_tag since
+ * Write-lock the INP when changing tls->snd_rcv_tag since
* ip[6]_output_send() holds a read-lock when reading the
* pointer.
*/
INP_WLOCK(inp);
- old = tls->snd_tag;
- tls->snd_tag = NULL;
+ snd_rcv_old = tls->snd_rcv_tag;
+ tls->snd_rcv_tag = NULL;
INP_WUNLOCK(inp);
- if (old != NULL)
- m_snd_tag_rele(old);
- error = ktls_alloc_snd_tag(inp, tls, true, &new);
+ if (snd_rcv_old != NULL)
+ m_snd_tag_rele(snd_rcv_old);
- if (error == 0) {
- INP_WLOCK(inp);
- tls->snd_tag = new;
- mtx_pool_lock(mtxpool_sleep, tls);
- tls->reset_pending = false;
- mtx_pool_unlock(mtxpool_sleep, tls);
- if (!in_pcbrele_wlocked(inp))
- INP_WUNLOCK(inp);
+ switch (tls->direction) {
+ case KTLS_TX:
+ error = ktls_alloc_snd_tag(inp, tls, true, &snd_rcv_new);
+ break;
+ case KTLS_RX:
+ error = ktls_alloc_rcv_tag(inp, tls, true, &snd_rcv_new);
+ break;
+ default:
+ goto drop_connection;
+ }
+ if (error != 0)
+ goto drop_connection;
- counter_u64_add(ktls_ifnet_reset, 1);
+ INP_WLOCK(inp);
+ tls->snd_rcv_tag = snd_rcv_new;
- /*
- * XXX: Should we kick tcp_output explicitly now that
- * the send tag is fixed or just rely on timers?
- */
- } else {
- NET_EPOCH_ENTER(et);
- INP_WLOCK(inp);
- if (!in_pcbrele_wlocked(inp)) {
- if (!(inp->inp_flags & INP_TIMEWAIT) &&
- !(inp->inp_flags & INP_DROPPED)) {
- tp = intotcpcb(inp);
- CURVNET_SET(tp->t_vnet);
- tp = tcp_drop(tp, ECONNABORTED);
- CURVNET_RESTORE();
- if (tp != NULL)
- INP_WUNLOCK(inp);
- counter_u64_add(ktls_ifnet_reset_dropped, 1);
- } else
- INP_WUNLOCK(inp);
- }
- NET_EPOCH_EXIT(et);
+ mtx_pool_lock(mtxpool_sleep, tls);
+ tls->reset_pending = false;
+ mtx_pool_unlock(mtxpool_sleep, tls);
- counter_u64_add(ktls_ifnet_reset_failed, 1);
+ if (!in_pcbrele_wlocked(inp))
+ INP_WUNLOCK(inp);
- /*
- * Leave reset_pending true to avoid future tasks while
- * the socket goes away.
- */
- }
+ counter_u64_add(ktls_ifnet_reset, 1);
ktls_free(tls);
-}
-int
-ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls)
-{
+ /*
+ * XXX: Should we kick tcp_output explicitly now that
+ * the send tag is fixed or just rely on timers?
+ */
+ return;
- if (inp == NULL)
- return (ENOBUFS);
+drop_connection:
+ NET_EPOCH_ENTER(et);
+ INP_WLOCK(inp);
+ if (!in_pcbrele_wlocked(inp)) {
+ if (!(inp->inp_flags & INP_TIMEWAIT) &&
+ !(inp->inp_flags & INP_DROPPED)) {
+ tp = intotcpcb(inp);
+ CURVNET_SET(tp->t_vnet);
+ tp = tcp_drop(tp, ECONNABORTED);
+ CURVNET_RESTORE();
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ counter_u64_add(ktls_ifnet_reset_dropped, 1);
+ } else
+ INP_WUNLOCK(inp);
+ }
+ NET_EPOCH_EXIT(et);
- INP_LOCK_ASSERT(inp);
+ counter_u64_add(ktls_ifnet_reset_failed, 1);
+ /*
+ * Leave reset_pending true to avoid future tasks while
+ * the socket goes away.
+ */
+ ktls_free(tls);
+}
+
+static void
+ktls_output_eagain_tls(struct inpcb *inp, struct ktls_session *tls)
+{
/*
* See if we should schedule a task to update the send tag for
* this session.
@@ -1548,6 +1671,30 @@
taskqueue_enqueue(taskqueue_thread, &tls->reset_tag_task);
}
mtx_pool_unlock(mtxpool_sleep, tls);
+}
+
+int
+ktls_output_eagain(struct inpcb *inp)
+{
+ struct socket *so;
+ struct ktls_session *tls;
+
+ if (__predict_false(inp == NULL))
+ goto done;
+ INP_LOCK_ASSERT(inp);
+
+ so = inp->inp_socket;
+ if (__predict_false(so == NULL))
+ goto done;
+
+ tls = so->so_rcv.sb_tls_info;
+ if (__predict_true(tls != NULL))
+ ktls_output_eagain_tls(inp, tls);
+
+ tls = so->so_snd.sb_tls_info;
+ if (__predict_true(tls != NULL))
+ ktls_output_eagain_tls(inp, tls);
+done:
return (ENOBUFS);
}
@@ -1566,7 +1713,7 @@
MPASS(tls->mode == TCP_TLS_MODE_IFNET);
- if (tls->snd_tag == NULL) {
+ if (tls->snd_rcv_tag == NULL) {
/*
* Resetting send tag, ignore this change. The
* pending reset may or may not see this updated rate
@@ -1576,10 +1723,11 @@
return (0);
}
- MPASS(tls->snd_tag != NULL);
- MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
+ mst = tls->snd_rcv_tag;
+
+ MPASS(mst != NULL);
+ MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RATE_LIMIT);
- mst = tls->snd_tag;
return (mst->sw->snd_tag_modify(mst, &params));
}
#endif
@@ -1862,7 +2010,7 @@
return (NULL);
}
}
- n->m_flags |= M_NOTREADY;
+ n->m_flags |= M_NOTREADY | (m->m_flags & M_DECRYPTED);
/* Store remainder in 'n'. */
n->m_len = m->m_len - remain;
@@ -1900,6 +2048,84 @@
return (top);
}
+/*
+ * Check if a mbuf chain is fully decrypted at the given offset and
+ * length. Returns -1 if all data is decrypted. 0 if there is a mix of
+ * encrypted and decrypted data. Else 1 if all data is encrypted.
+ */
+int
+ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len)
+{
+ int m_flags_ored = 0;
+ int m_flags_anded = -1;
+
+ for (; mb != NULL; mb = mb->m_next) {
+ if (offset < mb->m_len)
+ break;
+ offset -= mb->m_len;
+ }
+ offset += len;
+
+ for (; mb != NULL; mb = mb->m_next) {
+ m_flags_ored |= mb->m_flags;
+ m_flags_anded &= mb->m_flags;
+
+ if (offset <= mb->m_len)
+ break;
+ offset -= mb->m_len;
+ }
+ MPASS(mb != NULL || offset == 0);
+
+ if ((m_flags_ored ^ m_flags_anded) & M_DECRYPTED)
+ return (0); /* mixed */
+ else
+ return ((m_flags_ored & M_DECRYPTED) ? -1 : 1);
+}
+
+/*
+ * ktls_resync_ifnet - get HW TLS RX back on track after packet loss
+ */
+static int
+ktls_resync_ifnet(struct socket *so)
+{
+ union if_snd_tag_modify_params params;
+ struct m_snd_tag *mst;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+
+ mst = so->so_rcv.sb_tls_info->snd_rcv_tag;
+ if (__predict_false(mst == NULL))
+ return (EINVAL);
+
+ inp = sotoinpcb(so);
+ if (__predict_false(inp == NULL))
+ return (EINVAL);
+
+ INP_RLOCK(inp);
+ if (inp->inp_flags2 & INP_FREED) {
+ INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ INP_RUNLOCK(inp);
+ return (ECONNRESET);
+ }
+
+ tp = intotcpcb(inp);
+ MPASS(tp != NULL);
+
+ /* Get the TCP sequence number of the next valid TLS header. */
+ SOCKBUF_LOCK(&so->so_rcv);
+ params.tls_rx.next_tls_hdr_tcp_sn =
+ tp->rcv_nxt - so->so_rcv.sb_tlscc;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ INP_RUNLOCK(inp);
+
+ MPASS(mst->sw->type == IF_SND_TAG_TYPE_TLS_RX);
+ return (mst->sw->snd_tag_modify(mst, &params));
+}
+
static void
ktls_decrypt(struct socket *so)
{
@@ -1911,6 +2137,7 @@
struct mbuf *control, *data, *m;
uint64_t seqno;
int error, remain, tls_len, trail_len;
+ int state;
hdr = (struct tls_record_layer *)tls_header;
sb = &so->so_rcv;
@@ -1972,7 +2199,64 @@
SBCHECK(sb);
SOCKBUF_UNLOCK(sb);
- error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len);
+ /* get crypto state for this TLS record */
+ state = ktls_mbuf_crypto_state(data, 0, tls_len);
+
+ switch (state) {
+ struct mbuf *m0;
+ struct mbuf *m1;
+ int m0_off;
+ int m1_off;
+ int off;
+ case 0:
+ m0 = data;
+ m1 = m_copypacket(data, M_WAITOK);
+
+ /* Perform XOR of crypto sequence. */
+ error = tls->sw_decrypt(tls, hdr, m1, seqno, &trail_len, false);
+ if (__predict_false(error != 0)) {
+ m_freem(m1);
+ break;
+ }
+
+ /* Reconstruct encrypted mbuf data. */
+ for (off = m0_off = m1_off = 0; off != tls_len; ) {
+ int delta = MIN(m0->m_len - m0_off, m1->m_len - m1_off);
+ int maxlen = tls_len - off - tls->params.tls_tlen;
+
+ if (delta > maxlen)
+ delta = maxlen;
+
+ /* Copy encrypted data back over the decrypted data. */
+ if (m0->m_flags & M_DECRYPTED)
+ m_copydata(m1, m1_off, delta, mtod(m0, caddr_t) + m0_off);
+
+ off += delta;
+ m0_off += delta;
+ m1_off += delta;
+
+ if (m0_off == m0->m_len) {
+ m0 = m0->m_next;
+ m0_off = 0;
+ }
+
+ if (m1_off == m1->m_len) {
+ m1 = m_free(m1);
+ m1_off = 0;
+ }
+ }
+
+ m_freem(m1);
+
+ /* FALLTHROUGH */
+ case 1:
+ error = tls->sw_decrypt(tls, hdr, data, seqno, &trail_len, true);
+ break;
+ default:
+ error = 0;
+ break;
+ }
+
if (error) {
counter_u64_add(ktls_offload_failed_crypto, 1);
@@ -2047,23 +2331,29 @@
data = m_free(data);
}
- /* Trim trailer and clear M_NOTREADY. */
+ /* Trim trailer and clear M_NOTREADY and M_DECRYPTED. */
remain = be16toh(tgr.tls_length);
m = data;
for (m = data; remain > m->m_len; m = m->m_next) {
- m->m_flags &= ~M_NOTREADY;
+ m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
remain -= m->m_len;
}
m->m_len = remain;
m_freem(m->m_next);
m->m_next = NULL;
- m->m_flags &= ~M_NOTREADY;
+ m->m_flags &= ~(M_NOTREADY | M_DECRYPTED);
/* Set EOR on the final mbuf. */
m->m_flags |= M_EOR;
}
sbappendcontrol_locked(sb, data, control, 0);
+
+ if (__predict_false(state != -1)) {
+ SOCKBUF_UNLOCK(sb);
+ ktls_resync_ifnet(so);
+ SOCKBUF_LOCK(sb);
+ }
}
sb->sb_flags &= ~SB_TLS_RX_RUNNING;
diff --git a/sys/mips/cavium/cryptocteon/cryptocteon.c b/sys/mips/cavium/cryptocteon/cryptocteon.c
--- a/sys/mips/cavium/cryptocteon/cryptocteon.c
+++ b/sys/mips/cavium/cryptocteon/cryptocteon.c
@@ -387,7 +387,8 @@
(*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen,
auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
- if (csp->csp_auth_alg != 0) {
+ if (csp->csp_auth_alg != 0 &&
+ (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(crp, crp->crp_digest_start,
od->octo_mlen, icv2);
diff --git a/sys/mips/nlm/dev/sec/nlmsec.c b/sys/mips/nlm/dev/sec/nlmsec.c
--- a/sys/mips/nlm/dev/sec/nlmsec.c
+++ b/sys/mips/nlm/dev/sec/nlmsec.c
@@ -284,7 +284,8 @@
sc->sc_needwakeup &= ~CRYPTO_SYMQ;
}
}
- if (cmd->hash_dst_len != 0) {
+ if (cmd->hash_dst_len != 0 &&
+ (crp->crp_op & CRYPTO_OP_SKIP_DIGEST) == 0) {
if (cmd->crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
crypto_copydata(cmd->crp, cmd->crp->crp_digest_start,
cmd->hash_dst_len, hash);
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -1764,6 +1764,14 @@
.type = IF_SND_TAG_TYPE_TLS
};
+static const struct if_snd_tag_sw lagg_snd_tag_tls_rx_sw = {
+ .snd_tag_modify = lagg_snd_tag_modify,
+ .snd_tag_query = lagg_snd_tag_query,
+ .snd_tag_free = lagg_snd_tag_free,
+ .next_snd_tag = lagg_next_snd_tag,
+ .type = IF_SND_TAG_TYPE_TLS_RX
+};
+
#ifdef RATELIMIT
static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = {
.snd_tag_modify = lagg_snd_tag_modify,
@@ -1852,6 +1860,9 @@
case IF_SND_TAG_TYPE_TLS:
sw = &lagg_snd_tag_tls_sw;
break;
+ case IF_SND_TAG_TYPE_TLS_RX:
+ sw = &lagg_snd_tag_tls_rx_sw;
+ break;
#ifdef RATELIMIT
case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
sw = &lagg_snd_tag_tls_rl_sw;
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -192,7 +192,8 @@
#define IF_SND_TAG_TYPE_UNLIMITED 1
#define IF_SND_TAG_TYPE_TLS 2
#define IF_SND_TAG_TYPE_TLS_RATE_LIMIT 3
-#define IF_SND_TAG_TYPE_MAX 4
+#define IF_SND_TAG_TYPE_TLS_RX 4
+#define IF_SND_TAG_TYPE_MAX 5
struct if_snd_tag_alloc_header {
uint32_t type; /* send tag type, see IF_SND_TAG_XXX */
@@ -229,11 +230,17 @@
uint32_t flags; /* M_NOWAIT or M_WAITOK */
};
+struct if_snd_tag_modify_tls_rx {
+ /* TCP sequence number in host endian format */
+ uint32_t next_tls_hdr_tcp_sn;
+};
+
union if_snd_tag_alloc_params {
struct if_snd_tag_alloc_header hdr;
struct if_snd_tag_alloc_rate_limit rate_limit;
struct if_snd_tag_alloc_rate_limit unlimited;
struct if_snd_tag_alloc_tls tls;
+ struct if_snd_tag_alloc_tls tls_rx;
struct if_snd_tag_alloc_tls_rate_limit tls_rate_limit;
};
@@ -241,6 +248,7 @@
struct if_snd_tag_rate_limit_params rate_limit;
struct if_snd_tag_rate_limit_params unlimited;
struct if_snd_tag_rate_limit_params tls_rate_limit;
+ struct if_snd_tag_modify_tls_rx tls_rx;
};
union if_snd_tag_query_params {
diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c
--- a/sys/netinet/ip_output.c
+++ b/sys/netinet/ip_output.c
@@ -231,7 +231,7 @@
*/
if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) {
tls = ktls_hold(m->m_next->m_epg_tls);
- mst = tls->snd_tag;
+ mst = tls->snd_rcv_tag;
/*
* If a TLS session doesn't have a valid tag, it must
@@ -279,7 +279,7 @@
#ifdef KERN_TLS
if (tls != NULL) {
if (error == EAGAIN)
- error = ktls_output_eagain(inp, tls);
+ error = ktls_output_eagain(inp);
ktls_free(tls);
}
#endif
diff --git a/sys/netinet/tcp_ratelimit.c b/sys/netinet/tcp_ratelimit.c
--- a/sys/netinet/tcp_ratelimit.c
+++ b/sys/netinet/tcp_ratelimit.c
@@ -1354,14 +1354,14 @@
if (rte)
rl_increment_using(rte);
#ifdef KERN_TLS
- if (rte != NULL && tls != NULL && tls->snd_tag != NULL) {
+ if (rte != NULL && tls != NULL && tls->snd_rcv_tag != NULL) {
/*
* Fake a route change error to reset the TLS
* send tag. This will convert the existing
* tag to a TLS ratelimit tag.
*/
- MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS);
- ktls_output_eagain(tp->t_inpcb, tls);
+ MPASS(tls->snd_rcv_tag->sw->type == IF_SND_TAG_TYPE_TLS);
+ ktls_output_eagain(tp->t_inpcb);
}
#endif
} else {
@@ -1404,8 +1404,8 @@
if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info;
MPASS(tls->mode == TCP_TLS_MODE_IFNET);
- if (tls->snd_tag != NULL &&
- tls->snd_tag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT) {
+ if (tls->snd_rcv_tag != NULL &&
+ tls->snd_rcv_tag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT) {
/*
* NIC probably doesn't support ratelimit TLS
* tags if it didn't allocate one when an
diff --git a/sys/netinet6/ip6_output.c b/sys/netinet6/ip6_output.c
--- a/sys/netinet6/ip6_output.c
+++ b/sys/netinet6/ip6_output.c
@@ -344,7 +344,7 @@
*/
if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) {
tls = ktls_hold(m->m_next->m_epg_tls);
- mst = tls->snd_tag;
+ mst = tls->snd_rcv_tag;
/*
* If a TLS session doesn't have a valid tag, it must
@@ -392,7 +392,7 @@
#ifdef KERN_TLS
if (tls != NULL) {
if (error == EAGAIN)
- error = ktls_output_eagain(inp, tls);
+ error = ktls_output_eagain(inp);
ktls_free(tls);
}
#endif
diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c
--- a/sys/opencrypto/crypto.c
+++ b/sys/opencrypto/crypto.c
@@ -1285,14 +1285,17 @@
break;
case CSP_MODE_DIGEST:
KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
- crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
+ crp->crp_op == CRYPTO_OP_VERIFY_DIGEST ||
+ crp->crp_op == CRYPTO_OP_SKIP_DIGEST,
("invalid digest op %x", crp->crp_op));
break;
case CSP_MODE_AEAD:
KASSERT(crp->crp_op ==
(CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
crp->crp_op ==
- (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_SKIP_DIGEST),
("invalid AEAD op %x", crp->crp_op));
KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
("AEAD without a separate IV"));
@@ -1301,7 +1304,9 @@
KASSERT(crp->crp_op ==
(CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
crp->crp_op ==
- (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_SKIP_DIGEST),
("invalid ETA op %x", crp->crp_op));
break;
}
@@ -1359,7 +1364,10 @@
crp->crp_payload_length <= olen,
("payload outside output buffer"));
}
- if (csp->csp_mode == CSP_MODE_DIGEST ||
+
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ /* NOP */;
+ else if (csp->csp_mode == CSP_MODE_DIGEST ||
csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
len = ilen;
diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h
--- a/sys/opencrypto/cryptodev.h
+++ b/sys/opencrypto/cryptodev.h
@@ -583,6 +583,7 @@
#define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT
#define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT
#define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS)
+#define CRYPTO_OP_SKIP_DIGEST 0x4 /* skip all digest processing */
/*
* Hints passed to process methods.
diff --git a/sys/opencrypto/cryptodev.c b/sys/opencrypto/cryptodev.c
--- a/sys/opencrypto/cryptodev.c
+++ b/sys/opencrypto/cryptodev.c
@@ -981,7 +981,8 @@
}
}
- if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) {
+ if (cop->mac != NULL &&
+ (crp->crp_op & (CRYPTO_OP_VERIFY_DIGEST | CRYPTO_OP_SKIP_DIGEST)) == 0) {
error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) +
crp->crp_digest_start, cop->mac, cse->hashsize);
if (error) {
@@ -1180,7 +1181,7 @@
}
}
- if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) {
+ if ((crp->crp_op & (CRYPTO_OP_VERIFY_DIGEST | CRYPTO_OP_SKIP_DIGEST)) == 0) {
error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) +
crp->crp_digest_start, caead->tag, cse->hashsize);
if (error) {
diff --git a/sys/opencrypto/cryptosoft.c b/sys/opencrypto/cryptosoft.c
--- a/sys/opencrypto/cryptosoft.c
+++ b/sys/opencrypto/cryptosoft.c
@@ -319,6 +319,9 @@
union authctx ctx;
int err;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
sw = &ses->swcr_auth;
axf = sw->sw_axf;
@@ -396,6 +399,9 @@
size_t len;
int blksz, error, ivlen, resid;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
swa = &ses->swcr_auth;
axf = swa->sw_axf;
@@ -700,6 +706,9 @@
const struct auth_hash *axf;
int error, ivlen, len;
+ if (crp->crp_op & CRYPTO_OP_SKIP_DIGEST)
+ return (0);
+
csp = crypto_get_params(crp->crp_session);
swa = &ses->swcr_auth;
axf = swa->sw_axf;
diff --git a/sys/opencrypto/ktls_ocf.c b/sys/opencrypto/ktls_ocf.c
--- a/sys/opencrypto/ktls_ocf.c
+++ b/sys/opencrypto/ktls_ocf.c
@@ -453,7 +453,7 @@
static int
ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
- int *trailer_len)
+ int *trailer_len, bool verify_digest)
{
struct tls_aead_data ad;
struct cryptop crp;
@@ -502,7 +502,8 @@
crp.crp_payload_length = tls_comp_len;
crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
- crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
+ crp.crp_op = CRYPTO_OP_DECRYPT |
+ (verify_digest ? CRYPTO_OP_VERIFY_DIGEST : CRYPTO_OP_SKIP_DIGEST);
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
crypto_use_mbuf(&crp, m);
diff --git a/sys/sys/ktls.h b/sys/sys/ktls.h
--- a/sys/sys/ktls.h
+++ b/sys/sys/ktls.h
@@ -182,14 +182,15 @@
struct iovec *outiov, int outiovcnt);
int (*sw_decrypt)(struct ktls_session *tls,
const struct tls_record_layer *hdr, struct mbuf *m,
- uint64_t seqno, int *trailer_len);
+ uint64_t seqno, int *trailer_len, bool verify_digest);
};
struct ktls_ocf_session *ocf_session;
- struct m_snd_tag *snd_tag;
+ struct m_snd_tag *snd_rcv_tag;
struct tls_session_params params;
u_int wq_index;
volatile u_int refcount;
int mode;
+ int direction;
struct task reset_tag_task;
struct task disable_ifnet_task;
@@ -207,6 +208,7 @@
extern unsigned int ktls_ifnet_max_rexmit_pct;
void ktls_check_rx(struct sockbuf *sb);
+int ktls_mbuf_crypto_state(struct mbuf *mb, int offset, int len);
void ktls_disable_ifnet(void *arg);
int ktls_enable_rx(struct socket *so, struct tls_enable *en);
int ktls_enable_tx(struct socket *so, struct tls_enable *en);
@@ -219,7 +221,7 @@
int ktls_get_rx_mode(struct socket *so, int *modep);
int ktls_set_tx_mode(struct socket *so, int mode);
int ktls_get_tx_mode(struct socket *so, int *modep);
-int ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls);
+int ktls_output_eagain(struct inpcb *inp);
#ifdef RATELIMIT
int ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate);
#endif

File Metadata

Mime Type
text/plain
Expires
Mon, Nov 10, 1:06 PM (4 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
25108360
Default Alt Text
D32356.id97544.diff (32 KB)

Event Timeline