diff --git a/sys/conf/files b/sys/conf/files --- a/sys/conf/files +++ b/sys/conf/files @@ -4932,6 +4932,7 @@ opencrypto/cryptodeflate.c optional crypto | ipsec | ipsec_support opencrypto/gmac.c optional crypto | ipsec | ipsec_support opencrypto/gfmult.c optional crypto | ipsec | ipsec_support +opencrypto/ktls_ocf.c optional kern_tls opencrypto/rmd160.c optional crypto | ipsec | ipsec_support opencrypto/xform.c optional crypto | ipsec | ipsec_support opencrypto/xform_cbc_mac.c optional crypto diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -93,8 +93,6 @@ struct ktls_domain_info ktls_domains[MAXMEMDOM]; static struct ktls_wq *ktls_wq; static struct proc *ktls_proc; -LIST_HEAD(, ktls_crypto_backend) ktls_backends; -static struct rmlock ktls_backends_lock; static uma_zone_t ktls_session_zone; static uma_zone_t ktls_buffer_zone; static uint16_t ktls_cpuid_lookup[MAXCPU]; @@ -104,10 +102,6 @@ SYSCTL_NODE(_kern_ipc_tls, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Kernel TLS offload stats"); -static int ktls_allow_unload; -SYSCTL_INT(_kern_ipc_tls, OID_AUTO, allow_unload, CTLFLAG_RDTUN, - &ktls_allow_unload, 0, "Allow software crypto modules to unload"); - #ifdef RSS static int ktls_bind_threads = 1; #else @@ -270,76 +264,6 @@ #endif static void ktls_work_thread(void *ctx); -int -ktls_crypto_backend_register(struct ktls_crypto_backend *be) -{ - struct ktls_crypto_backend *curr_be, *tmp; - - if (be->api_version != KTLS_API_VERSION) { - printf("KTLS: API version mismatch (%d vs %d) for %s\n", - be->api_version, KTLS_API_VERSION, - be->name); - return (EINVAL); - } - - rm_wlock(&ktls_backends_lock); - printf("KTLS: Registering crypto method %s with prio %d\n", - be->name, be->prio); - if (LIST_EMPTY(&ktls_backends)) { - LIST_INSERT_HEAD(&ktls_backends, be, next); - } else { - LIST_FOREACH_SAFE(curr_be, &ktls_backends, next, tmp) { - if (curr_be->prio < be->prio) { - LIST_INSERT_BEFORE(curr_be, be, next); - break; - } - if (LIST_NEXT(curr_be, next) == NULL) { - LIST_INSERT_AFTER(curr_be, be, next); - break; - } - } - } - rm_wunlock(&ktls_backends_lock); - return (0); -} - -int -ktls_crypto_backend_deregister(struct ktls_crypto_backend *be) -{ - struct ktls_crypto_backend *tmp; - - /* - * Don't error if the backend isn't registered. This permits - * MOD_UNLOAD handlers to use this function unconditionally. - */ - rm_wlock(&ktls_backends_lock); - LIST_FOREACH(tmp, &ktls_backends, next) { - if (tmp == be) - break; - } - if (tmp == NULL) { - rm_wunlock(&ktls_backends_lock); - return (0); - } - - if (!ktls_allow_unload) { - rm_wunlock(&ktls_backends_lock); - printf( - "KTLS: Deregistering crypto method %s is not supported\n", - be->name); - return (EBUSY); - } - - if (be->use_count) { - rm_wunlock(&ktls_backends_lock); - return (EBUSY); - } - - LIST_REMOVE(be, next); - rm_wunlock(&ktls_backends_lock); - return (0); -} - #if defined(INET) || defined(INET6) static u_int ktls_get_cpu(struct socket *so) @@ -358,7 +282,7 @@ #endif /* * Just use the flowid to shard connections in a repeatable - * fashion. Note that some crypto backends rely on the + * fashion. Note that TLS 1.0 sessions rely on the * serialization provided by having the same connection use * the same queue. */ @@ -426,9 +350,6 @@ cpuset_t mask; int count, domain, error, i; - rm_init(&ktls_backends_lock, "ktls backends"); - LIST_INIT(&ktls_backends); - ktls_wq = malloc(sizeof(*ktls_wq) * (mp_maxid + 1), M_KTLS, M_WAITOK | M_ZERO); @@ -761,7 +682,6 @@ counter_u64_add(ktls_offload_active, -1); switch (tls->mode) { case TCP_TLS_MODE_SW: - MPASS(tls->be != NULL); switch (tls->params.cipher_algorithm) { case CRYPTO_AES_CBC: counter_u64_add(ktls_sw_cbc, -1); @@ -773,7 +693,7 @@ counter_u64_add(ktls_sw_chacha20, -1); break; } - tls->free(tls); + ktls_ocf_free(tls); break; case TCP_TLS_MODE_IFNET: switch (tls->params.cipher_algorithm) { @@ -996,33 +916,11 @@ static int ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction) { - struct rm_priotracker prio; - struct ktls_crypto_backend *be; + int error; - /* - * Choose the best software crypto backend. Backends are - * stored in sorted priority order (larget value == most - * important at the head of the list), so this just stops on - * the first backend that claims the session by returning - * success. - */ - if (ktls_allow_unload) - rm_rlock(&ktls_backends_lock, &prio); - LIST_FOREACH(be, &ktls_backends, next) { - if (be->try(so, tls, direction) == 0) - break; - KASSERT(tls->cipher == NULL, - ("ktls backend leaked a cipher pointer")); - } - if (be != NULL) { - if (ktls_allow_unload) - be->use_count++; - tls->be = be; - } - if (ktls_allow_unload) - rm_runlock(&ktls_backends_lock, &prio); - if (be == NULL) - return (EOPNOTSUPP); + error = ktls_ocf_try(so, tls, direction); + if (error) + return (error); tls->mode = TCP_TLS_MODE_SW; switch (tls->params.cipher_algorithm) { case CRYPTO_AES_CBC: @@ -1531,14 +1429,8 @@ void ktls_destroy(struct ktls_session *tls) { - struct rm_priotracker prio; ktls_cleanup(tls); - if (tls->be != NULL && ktls_allow_unload) { - rm_rlock(&ktls_backends_lock, &prio); - tls->be->use_count--; - rm_runlock(&ktls_backends_lock, &prio); - } uma_zfree(ktls_session_zone, tls); } @@ -1621,7 +1513,7 @@ * * Compute the final trailer length assuming * at most one block of padding. - * tls->params.sb_tls_tlen is the maximum + * tls->params.tls_tlen is the maximum * possible trailer length (padding + digest). * delta holds the number of excess padding * bytes if the maximum were used. Those @@ -2096,12 +1988,10 @@ struct socket *so; struct mbuf *m; vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; - struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; vm_page_t pg; void *cbuf; int error, i, len, npages, off, total_pages; - bool is_anon; so = top->m_epg_so; tls = top->m_epg_tls; @@ -2146,75 +2036,45 @@ m->m_epg_npgs, ktls_maxlen)); /* - * Generate source and destination ivoecs to pass to - * the SW encryption backend. For writable mbufs, the - * destination iovec is a copy of the source and - * encryption is done in place. For file-backed mbufs - * (from sendfile), anonymous wired pages are - * allocated and assigned to the destination iovec. + * For anonymous mbufs, encryption is done in place. + * For file-backed mbufs (from sendfile), anonymous + * wired pages are allocated and used as the + * encryption destination. */ - is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0; - - off = m->m_epg_1st_off; - for (i = 0; i < m->m_epg_npgs; i++, off = 0) { - len = m_epg_pagelen(m, i, off); - src_iov[i].iov_len = len; - src_iov[i].iov_base = - (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) + off; - } - - if (is_anon) { - memcpy(dst_iov, src_iov, i * sizeof(struct iovec)); - } else if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) { - len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len - - m->m_epg_1st_off; - dst_iov[0].iov_base = (char *)cbuf + m->m_epg_1st_off; - dst_iov[0].iov_len = len; - parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf); - i = 1; + if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) { + error = (*tls->sw_encrypt)(tls, m, NULL, 0); } else { - cbuf = NULL; - off = m->m_epg_1st_off; - for (i = 0; i < m->m_epg_npgs; i++, off = 0) { - do { - pg = vm_page_alloc(NULL, 0, - VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | - VM_ALLOC_NODUMP | - VM_ALLOC_WIRED | - VM_ALLOC_WAITFAIL); - } while (pg == NULL); - - len = m_epg_pagelen(m, i, off); - parray[i] = VM_PAGE_TO_PHYS(pg); - dst_iov[i].iov_base = - (char *)(void *)PHYS_TO_DMAP( - parray[i]) + off; - dst_iov[i].iov_len = len; + if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) { + len = ptoa(m->m_epg_npgs - 1) + + m->m_epg_last_len - m->m_epg_1st_off; + dst_iov[0].iov_base = (char *)cbuf + + m->m_epg_1st_off; + dst_iov[0].iov_len = len; + parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf); + i = 1; + } else { + off = m->m_epg_1st_off; + for (i = 0; i < m->m_epg_npgs; i++, off = 0) { + do { + pg = vm_page_alloc(NULL, 0, + VM_ALLOC_NORMAL | + VM_ALLOC_NOOBJ | + VM_ALLOC_NODUMP | + VM_ALLOC_WIRED | + VM_ALLOC_WAITFAIL); + } while (pg == NULL); + + len = m_epg_pagelen(m, i, off); + parray[i] = VM_PAGE_TO_PHYS(pg); + dst_iov[i].iov_base = + (char *)(void *)PHYS_TO_DMAP( + parray[i]) + off; + dst_iov[i].iov_len = len; + } } - } - if (__predict_false(m->m_epg_npgs == 0)) { - /* TLS 1.0 empty fragment. */ - npages++; - } else - npages += m->m_epg_npgs; + error = (*tls->sw_encrypt)(tls, m, dst_iov, i); - error = (*tls->sw_encrypt)(tls, - (const struct tls_record_layer *)m->m_epg_hdr, - m->m_epg_trail, src_iov, dst_iov, m->m_epg_npgs, i, - m->m_epg_seqno, m->m_epg_record_type); - if (error) { - counter_u64_add(ktls_offload_failed_crypto, 1); - break; - } - - /* - * For file-backed mbufs, release the file-backed - * pages and replace them in the ext_pgs array with - * the anonymous wired pages allocated above. - */ - if (!is_anon) { /* Free the old pages. */ m->m_ext.ext_free(m); @@ -2236,6 +2096,16 @@ /* Pages are now writable. */ m->m_epg_flags |= EPG_FLAG_ANON; } + if (error) { + counter_u64_add(ktls_offload_failed_crypto, 1); + break; + } + + if (__predict_false(m->m_epg_npgs == 0)) { + /* TLS 1.0 empty fragment. */ + npages++; + } else + npages += m->m_epg_npgs; /* * Drop a reference to the session now that it is no diff --git a/sys/modules/Makefile b/sys/modules/Makefile --- a/sys/modules/Makefile +++ b/sys/modules/Makefile @@ -206,7 +206,6 @@ khelp \ krpc \ ksyms \ - ${_ktls_ocf} \ le \ lge \ libalias \ @@ -422,7 +421,6 @@ _cryptodev= cryptodev _random_fortuna=random_fortuna _random_other= random_other -_ktls_ocf= ktls_ocf .endif .endif diff --git a/sys/modules/ktls_ocf/Makefile b/sys/modules/ktls_ocf/Makefile deleted file mode 100644 --- a/sys/modules/ktls_ocf/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# $FreeBSD$ - -.PATH: ${SRCTOP}/sys/opencrypto - -KMOD= ktls_ocf -SRCS= ktls_ocf.c - -.include diff --git a/sys/opencrypto/ktls_ocf.c b/sys/opencrypto/ktls_ocf.c --- a/sys/opencrypto/ktls_ocf.c +++ b/sys/opencrypto/ktls_ocf.c @@ -37,17 +37,21 @@ #include #include #include +#include #include #include #include #include +#include +#include +#include #include struct ocf_session { crypto_session_t sid; crypto_session_t mac_sid; - int mac_len; struct mtx lock; + int mac_len; bool implicit_iv; /* Only used for TLS 1.0 with the implicit IV. */ @@ -176,33 +180,35 @@ } static int -ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, - const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno, - uint8_t record_type __unused) +ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m, + struct iovec *outiov, int outiovcnt) { - struct uio uio, out_uio; + const struct tls_record_layer *hdr; + struct uio uio; struct tls_mac_data ad; struct cryptop crp; struct ocf_session *os; - struct iovec iov[iniovcnt + 2]; - struct iovec out_iov[outiovcnt + 1]; + struct iovec iov[m->m_epg_npgs + 2]; + u_int pgoff; int i, error; uint16_t tls_comp_len; uint8_t pad; - bool inplace; + + MPASS(outiovcnt + 1 <= nitems(iov)); os = tls->cipher; + hdr = (const struct tls_record_layer *)m->m_epg_hdr; #ifdef INVARIANTS if (os->implicit_iv) { mtx_lock(&os->lock); KASSERT(!os->in_progress, ("concurrent implicit IV encryptions")); - if (os->next_seqno != seqno) { + if (os->next_seqno != m->m_epg_seqno) { printf("KTLS CBC: TLS records out of order. " "Expected %ju, got %ju\n", - (uintmax_t)os->next_seqno, (uintmax_t)seqno); + (uintmax_t)os->next_seqno, + (uintmax_t)m->m_epg_seqno); mtx_unlock(&os->lock); return (EINVAL); } @@ -211,24 +217,11 @@ } #endif - /* - * Compute the payload length. - * - * XXX: This could be easily computed O(1) from the mbuf - * fields, but we don't have those accessible here. Can - * at least compute inplace as well while we are here. - */ - tls_comp_len = 0; - inplace = iniovcnt == outiovcnt; - for (i = 0; i < iniovcnt; i++) { - tls_comp_len += iniov[i].iov_len; - if (inplace && - (i >= outiovcnt || iniov[i].iov_base != outiov[i].iov_base)) - inplace = false; - } + /* Payload length. */ + tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); /* Initialize the AAD. */ - ad.seq = htobe64(seqno); + ad.seq = htobe64(m->m_epg_seqno); ad.type = hdr->tls_type; ad.tls_vmajor = hdr->tls_vmajor; ad.tls_vminor = hdr->tls_vminor; @@ -237,11 +230,16 @@ /* First, compute the MAC. */ iov[0].iov_base = &ad; iov[0].iov_len = sizeof(ad); - memcpy(&iov[1], iniov, sizeof(*iniov) * iniovcnt); - iov[iniovcnt + 1].iov_base = trailer; - iov[iniovcnt + 1].iov_len = os->mac_len; + pgoff = m->m_epg_1st_off; + for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) { + iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + + pgoff); + iov[i + 1].iov_len = m_epg_pagelen(m, i, pgoff); + } + iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail; + iov[m->m_epg_npgs + 1].iov_len = os->mac_len; uio.uio_iov = iov; - uio.uio_iovcnt = iniovcnt + 2; + uio.uio_iovcnt = m->m_epg_npgs + 2; uio.uio_offset = 0; uio.uio_segflg = UIO_SYSSPACE; uio.uio_td = curthread; @@ -269,54 +267,47 @@ } /* Second, add the padding. */ - pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) % - AES_BLOCK_LEN; + pad = m->m_epg_trllen - os->mac_len - 1; for (i = 0; i < pad + 1; i++) - trailer[os->mac_len + i] = pad; + m->m_epg_trail[os->mac_len + i] = pad; /* Finally, encrypt the record. */ - - /* - * Don't recopy the input iovec, instead just adjust the - * trailer length and skip over the AAD vector in the uio. - */ - iov[iniovcnt + 1].iov_len += pad + 1; - uio.uio_iov = iov + 1; - uio.uio_iovcnt = iniovcnt + 1; - uio.uio_resid = tls_comp_len + iov[iniovcnt + 1].iov_len; - KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0, - ("invalid encryption size")); - crypto_initreq(&crp, os->sid); - crp.crp_payload_start = 0; - crp.crp_payload_length = uio.uio_resid; + crp.crp_payload_start = m->m_epg_hdrlen; + crp.crp_payload_length = tls_comp_len + m->m_epg_trllen; + KASSERT(crp.crp_payload_length % AES_BLOCK_LEN == 0, + ("invalid encryption size")); + crypto_use_single_mbuf(&crp, m); crp.crp_op = CRYPTO_OP_ENCRYPT; crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; if (os->implicit_iv) memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN); else memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN); - crypto_use_uio(&crp, &uio); - if (!inplace) { - memcpy(out_iov, outiov, sizeof(*outiov) * outiovcnt); - out_iov[outiovcnt] = iov[iniovcnt + 1]; - out_uio.uio_iov = out_iov; - out_uio.uio_iovcnt = outiovcnt + 1; - out_uio.uio_offset = 0; - out_uio.uio_segflg = UIO_SYSSPACE; - out_uio.uio_td = curthread; - out_uio.uio_resid = uio.uio_resid; - crypto_use_output_uio(&crp, &out_uio); + + if (outiov != NULL) { + /* Duplicate iovec and append vector for trailer. */ + memcpy(iov, outiov, outiovcnt * sizeof(struct iovec)); + iov[outiovcnt].iov_base = m->m_epg_trail; + iov[outiovcnt].iov_len = m->m_epg_trllen; + + uio.uio_iov = iov; + uio.uio_iovcnt = outiovcnt + 1; + uio.uio_offset = 0; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_td = curthread; + uio.uio_resid = crp.crp_payload_length; + crypto_use_output_uio(&crp, &uio); } if (os->implicit_iv) counter_u64_add(ocf_tls10_cbc_crypts, 1); else counter_u64_add(ocf_tls11_cbc_crypts, 1); - if (inplace) - counter_u64_add(ocf_inplace, 1); - else + if (outiov != NULL) counter_u64_add(ocf_separate_output, 1); + else + counter_u64_add(ocf_inplace, 1); error = ktls_ocf_dispatch(os, &crp); crypto_destroyreq(&crp); @@ -324,11 +315,11 @@ if (os->implicit_iv) { KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN, ("trailer too short to read IV")); - memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN, + memcpy(os->iv, m->m_epg_trail + m->m_epg_trllen - AES_BLOCK_LEN, AES_BLOCK_LEN); #ifdef INVARIANTS mtx_lock(&os->lock); - os->next_seqno = seqno + 1; + os->next_seqno = m->m_epg_seqno + 1; os->in_progress = false; mtx_unlock(&os->lock); #endif @@ -337,33 +328,20 @@ } static int -ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, - const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno, - uint8_t record_type __unused) +ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, struct mbuf *m, + struct iovec *outiov, int outiovcnt) { - struct uio uio, out_uio, *tag_uio; + const struct tls_record_layer *hdr; + struct uio uio; struct tls_aead_data ad; struct cryptop crp; struct ocf_session *os; struct iovec iov[outiovcnt + 1]; - int i, error; + int error; uint16_t tls_comp_len; - bool inplace; os = tls->cipher; - - uio.uio_iov = iniov; - uio.uio_iovcnt = iniovcnt; - uio.uio_offset = 0; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; - - out_uio.uio_iov = outiov; - out_uio.uio_iovcnt = outiovcnt; - out_uio.uio_offset = 0; - out_uio.uio_segflg = UIO_SYSSPACE; - out_uio.uio_td = curthread; + hdr = (const struct tls_record_layer *)m->m_epg_hdr; crypto_initreq(&crp, os->sid); @@ -379,16 +357,12 @@ * 1.3. */ memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len); - *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno); + *(uint64_t *)(crp.crp_iv + 4) ^= htobe64(m->m_epg_seqno); } /* Setup the AAD. */ - if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) - tls_comp_len = ntohs(hdr->tls_length) - - (AES_GMAC_HASH_LEN + sizeof(uint64_t)); - else - tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN; - ad.seq = htobe64(seqno); + tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); + ad.seq = htobe64(m->m_epg_seqno); ad.type = hdr->tls_type; ad.tls_vmajor = hdr->tls_vmajor; ad.tls_vminor = hdr->tls_vminor; @@ -396,46 +370,39 @@ crp.crp_aad = &ad; crp.crp_aad_length = sizeof(ad); - /* Compute payload length and determine if encryption is in place. */ - inplace = iniovcnt == outiovcnt; - crp.crp_payload_start = 0; - for (i = 0; i < iniovcnt; i++) { - if (inplace && - (i >= outiovcnt || iniov[i].iov_base != outiov[i].iov_base)) - inplace = false; - crp.crp_payload_length += iniov[i].iov_len; - } - uio.uio_resid = crp.crp_payload_length; - out_uio.uio_resid = crp.crp_payload_length; - - if (inplace) - tag_uio = &uio; - else - tag_uio = &out_uio; + /* Set fields for input payload. */ + crypto_use_single_mbuf(&crp, m); + crp.crp_payload_start = m->m_epg_hdrlen; + crp.crp_payload_length = tls_comp_len; - /* Duplicate iovec and append vector for tag. */ - memcpy(iov, tag_uio->uio_iov, outiovcnt * sizeof(struct iovec)); - iov[outiovcnt].iov_base = trailer; - iov[outiovcnt].iov_len = tls->params.tls_tlen; - tag_uio->uio_iov = iov; - tag_uio->uio_iovcnt++; - crp.crp_digest_start = tag_uio->uio_resid; - tag_uio->uio_resid += tls->params.tls_tlen; + if (outiov != NULL) { + /* Duplicate iovec and append vector for tag. */ + memcpy(iov, outiov, outiovcnt * sizeof(struct iovec)); + iov[outiovcnt].iov_base = m->m_epg_trail; + iov[outiovcnt].iov_len = tls->params.tls_tlen; + crp.crp_digest_start = crp.crp_payload_length; + + uio.uio_iov = iov; + uio.uio_iovcnt = outiovcnt + 1; + uio.uio_offset = 0; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_td = curthread; + uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen; + crypto_use_output_uio(&crp, &uio); + } else + crp.crp_digest_start = crp.crp_payload_start + + crp.crp_payload_length; crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; - crypto_use_uio(&crp, &uio); - if (!inplace) - crypto_use_output_uio(&crp, &out_uio); - if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) counter_u64_add(ocf_tls12_gcm_crypts, 1); else counter_u64_add(ocf_tls12_chacha20_crypts, 1); - if (inplace) - counter_u64_add(ocf_inplace, 1); - else + if (outiov != NULL) counter_u64_add(ocf_separate_output, 1); + else + counter_u64_add(ocf_inplace, 1); error = ktls_ocf_dispatch(os, &crp); crypto_destroyreq(&crp); @@ -510,27 +477,26 @@ } static int -ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls, - const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, - struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno, - uint8_t record_type) +ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls, struct mbuf *m, + struct iovec *outiov, int outiovcnt) { - struct uio uio, out_uio; + const struct tls_record_layer *hdr; + struct uio uio; struct tls_aead_data_13 ad; char nonce[12]; struct cryptop crp; struct ocf_session *os; - struct iovec iov[iniovcnt + 1], out_iov[outiovcnt + 1]; - int i, error; - bool inplace; + struct iovec iov[outiovcnt + 1]; + int error; os = tls->cipher; + hdr = (const struct tls_record_layer *)m->m_epg_hdr; crypto_initreq(&crp, os->sid); /* Setup the nonce. */ memcpy(nonce, tls->params.iv, tls->params.iv_len); - *(uint64_t *)(nonce + 4) ^= htobe64(seqno); + *(uint64_t *)(nonce + 4) ^= htobe64(m->m_epg_seqno); /* Setup the AAD. */ ad.type = hdr->tls_type; @@ -540,51 +506,32 @@ crp.crp_aad = &ad; crp.crp_aad_length = sizeof(ad); - /* Compute payload length and determine if encryption is in place. */ - inplace = iniovcnt == outiovcnt; - crp.crp_payload_start = 0; - for (i = 0; i < iniovcnt; i++) { - if (inplace && (i >= outiovcnt || - iniov[i].iov_base != outiov[i].iov_base)) - inplace = false; - crp.crp_payload_length += iniov[i].iov_len; - } + /* Set fields for input payload. */ + crypto_use_single_mbuf(&crp, m); + crp.crp_payload_start = m->m_epg_hdrlen; + crp.crp_payload_length = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen); /* Store the record type as the first byte of the trailer. */ - trailer[0] = record_type; + m->m_epg_trail[0] = m->m_epg_record_type; crp.crp_payload_length++; - crp.crp_digest_start = crp.crp_payload_length; - /* - * Duplicate the input iov to append the trailer. Always - * include the full trailer as input to get the record_type - * even if only the first byte is used. - */ - memcpy(iov, iniov, iniovcnt * sizeof(*iov)); - iov[iniovcnt].iov_base = trailer; - iov[iniovcnt].iov_len = tls->params.tls_tlen; - uio.uio_iov = iov; - uio.uio_iovcnt = iniovcnt + 1; - uio.uio_offset = 0; - uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen - 1; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; - crypto_use_uio(&crp, &uio); - - if (!inplace) { - /* Duplicate the output iov to append the trailer. */ - memcpy(out_iov, outiov, outiovcnt * sizeof(*out_iov)); - out_iov[outiovcnt] = iov[iniovcnt]; - - out_uio.uio_iov = out_iov; - out_uio.uio_iovcnt = outiovcnt + 1; - out_uio.uio_offset = 0; - out_uio.uio_resid = crp.crp_payload_length + - tls->params.tls_tlen - 1; - out_uio.uio_segflg = UIO_SYSSPACE; - out_uio.uio_td = curthread; - crypto_use_output_uio(&crp, &out_uio); - } + if (outiov != NULL) { + /* Duplicate iovec and append vector for tag. */ + memcpy(iov, outiov, outiovcnt * sizeof(struct iovec)); + iov[outiovcnt].iov_base = m->m_epg_trail; + iov[outiovcnt].iov_len = tls->params.tls_tlen; + crp.crp_digest_start = crp.crp_payload_length; + + uio.uio_iov = iov; + uio.uio_iovcnt = outiovcnt + 1; + uio.uio_offset = 0; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_td = curthread; + uio.uio_resid = m->m_len - m->m_epg_hdrlen; + crypto_use_output_uio(&crp, &uio); + } else + crp.crp_digest_start = crp.crp_payload_start + + crp.crp_payload_length; crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; @@ -595,17 +542,17 @@ counter_u64_add(ocf_tls13_gcm_crypts, 1); else counter_u64_add(ocf_tls13_chacha20_crypts, 1); - if (inplace) - counter_u64_add(ocf_inplace, 1); - else + if (outiov != NULL) counter_u64_add(ocf_separate_output, 1); + else + counter_u64_add(ocf_inplace, 1); error = ktls_ocf_dispatch(os, &crp); crypto_destroyreq(&crp); return (error); } -static void +void ktls_ocf_free(struct ktls_session *tls) { struct ocf_session *os; @@ -616,7 +563,7 @@ zfree(os, M_KTLS_OCF); } -static int +int ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction) { struct crypto_session_params csp, mac_csp; @@ -773,34 +720,5 @@ memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN); } } - tls->free = ktls_ocf_free; return (0); } - -struct ktls_crypto_backend ocf_backend = { - .name = "OCF", - .prio = 5, - .api_version = KTLS_API_VERSION, - .try = ktls_ocf_try, -}; - -static int -ktls_ocf_modevent(module_t mod, int what, void *arg) -{ - switch (what) { - case MOD_LOAD: - return (ktls_crypto_backend_register(&ocf_backend)); - case MOD_UNLOAD: - return (ktls_crypto_backend_deregister(&ocf_backend)); - default: - return (EOPNOTSUPP); - } -} - -static moduledata_t ktls_ocf_moduledata = { - "ktls_ocf", - ktls_ocf_modevent, - NULL -}; - -DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY); diff --git a/sys/sys/ktls.h b/sys/sys/ktls.h --- a/sys/sys/ktls.h +++ b/sys/sys/ktls.h @@ -166,8 +166,6 @@ #define KTLS_TX 1 #define KTLS_RX 2 -#define KTLS_API_VERSION 8 - struct iovec; struct ktls_session; struct m_snd_tag; @@ -175,21 +173,10 @@ struct sockbuf; struct socket; -struct ktls_crypto_backend { - LIST_ENTRY(ktls_crypto_backend) next; - int (*try)(struct socket *so, struct ktls_session *tls, int direction); - int prio; - int api_version; - int use_count; - const char *name; -}; - struct ktls_session { union { - int (*sw_encrypt)(struct ktls_session *tls, - const struct tls_record_layer *hdr, uint8_t *trailer, - struct iovec *src, struct iovec *dst, int srciovcnt, - int dstiovcnt, uint64_t seqno, uint8_t record_type); + int (*sw_encrypt)(struct ktls_session *tls, struct mbuf *m, + struct iovec *dst, int iovcnt); int (*sw_decrypt)(struct ktls_session *tls, const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno, int *trailer_len); @@ -198,26 +185,24 @@ void *cipher; struct m_snd_tag *snd_tag; }; - struct ktls_crypto_backend *be; - void (*free)(struct ktls_session *tls); struct tls_session_params params; u_int wq_index; volatile u_int refcount; int mode; + bool reset_pending; struct task reset_tag_task; struct inpcb *inp; - bool reset_pending; } __aligned(CACHE_LINE_SIZE); void ktls_check_rx(struct sockbuf *sb); -int ktls_crypto_backend_register(struct ktls_crypto_backend *be); -int ktls_crypto_backend_deregister(struct ktls_crypto_backend *be); int ktls_enable_rx(struct socket *so, struct tls_enable *en); int ktls_enable_tx(struct socket *so, struct tls_enable *en); void ktls_destroy(struct ktls_session *tls); void ktls_frame(struct mbuf *m, struct ktls_session *tls, int *enqueue_cnt, uint8_t record_type); +void ktls_ocf_free(struct ktls_session *tls); +int ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction); void ktls_seq(struct sockbuf *sb, struct mbuf *m); void ktls_enqueue(struct mbuf *m, struct socket *so, int page_count); void ktls_enqueue_to_free(struct mbuf *m); diff --git a/sys/sys/param.h b/sys/sys/param.h --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -76,7 +76,7 @@ * cannot include sys/param.h and should only be updated here. */ #undef __FreeBSD_version -#define __FreeBSD_version 1400015 +#define __FreeBSD_version 1400016 /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,