Index: sys/crypto/aesni/aesni.c =================================================================== --- sys/crypto/aesni/aesni.c +++ sys/crypto/aesni/aesni.c @@ -253,7 +253,7 @@ struct aesni_softc *sc; sc = device_get_softc(dev); - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: @@ -677,15 +677,17 @@ aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { - uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf; + uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN]; + uint8_t *authbuf, *buf, *outbuf; int error; - bool encflag, allocated, authallocated; + bool encflag, allocated, authallocated, outallocated, outcopy; buf = aesni_cipher_alloc(crp, crp->crp_payload_start, crp->crp_payload_length, &allocated); if (buf == NULL) return (ENOMEM); + outallocated = false; authallocated = false; authbuf = NULL; if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 || @@ -698,6 +700,29 @@ } } + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + outbuf = crypto_buffer_contiguous_subsegment(&crp->crp_obuf, + crp->crp_payload_output_start, crp->crp_payload_length); + if (outbuf == NULL) { + outcopy = true; + if (allocated) + outbuf = buf; + else { + outbuf = malloc(crp->crp_payload_length, + M_AESNI, M_NOWAIT); + if (outbuf == NULL) { + error = ENOMEM; + goto out; + } + outallocated = true; + } + } else + outcopy = false; + } else { + outbuf = buf; + outcopy = allocated; + } + error = 0; encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); if (crp->crp_cipher_key != NULL) @@ -710,30 +735,33 @@ case CRYPTO_AES_CBC: if (encflag) aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, - crp->crp_payload_length, buf, buf, iv); - else + crp->crp_payload_length, buf, outbuf, iv); + else { + if (buf != outbuf) + memcpy(outbuf, buf, crp->crp_payload_length); aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, - crp->crp_payload_length, buf, iv); + crp->crp_payload_length, outbuf, iv); + } break; case CRYPTO_AES_ICM: /* encryption & decryption are the same */ aesni_encrypt_icm(ses->rounds, ses->enc_schedule, - crp->crp_payload_length, buf, buf, iv); + crp->crp_payload_length, buf, outbuf, iv); break; case CRYPTO_AES_XTS: if (encflag) aesni_encrypt_xts(ses->rounds, ses->enc_schedule, ses->xts_schedule, crp->crp_payload_length, buf, - buf, iv); + outbuf, iv); else aesni_decrypt_xts(ses->rounds, ses->dec_schedule, ses->xts_schedule, crp->crp_payload_length, buf, - buf, iv); + outbuf, iv); break; case CRYPTO_AES_NIST_GCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); - AES_GCM_encrypt(buf, buf, authbuf, iv, tag, + AES_GCM_encrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), @@ -741,7 +769,7 @@ } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); - if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, + if (!AES_GCM_decrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; @@ -750,7 +778,7 @@ case CRYPTO_AES_CCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); - AES_CCM_encrypt(buf, buf, authbuf, iv, tag, + AES_CCM_encrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), @@ -758,16 +786,17 @@ } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); - if (!AES_CCM_decrypt(buf, buf, authbuf, iv, tag, + if (!AES_CCM_decrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; } - if (allocated && error == 0) - crypto_copyback(crp, crp->crp_payload_start, - crp->crp_payload_length, buf); + if (outcopy && error == 0) + crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ? + crp->crp_payload_output_start : crp->crp_payload_start, + crp->crp_payload_length, outbuf); out: if (allocated) { @@ -778,6 +807,10 @@ explicit_bzero(authbuf, crp->crp_aad_length); free(authbuf, M_AESNI); } + if (outallocated) { + explicit_bzero(outbuf, crp->crp_payload_length); + free(outbuf, M_AESNI); + } return (error); } @@ -813,10 +846,18 @@ crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); - crypto_apply(crp, crp->crp_payload_start, - crp->crp_payload_length, - __DECONST(int (*)(void *, void *, u_int), ses->hash_update), - &sctx); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && + CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + crypto_apply_buf(&crp->crp_obuf, + crp->crp_payload_output_start, + crp->crp_payload_length, + __DECONST(int (*)(void *, void *, u_int), + ses->hash_update), &sctx); + else + crypto_apply(crp, crp->crp_payload_start, + crp->crp_payload_length, + __DECONST(int (*)(void *, void *, u_int), + ses->hash_update), &sctx); ses->hash_finalize(res, &sctx); /* Outer hash: (K ^ OPAD) || inner hash */ @@ -834,10 +875,18 @@ crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); - crypto_apply(crp, crp->crp_payload_start, - crp->crp_payload_length, - __DECONST(int (*)(void *, void *, u_int), ses->hash_update), - &sctx); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && + CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + crypto_apply_buf(&crp->crp_obuf, + crp->crp_payload_output_start, + crp->crp_payload_length, + __DECONST(int (*)(void *, void *, u_int), + ses->hash_update), &sctx); + else + crypto_apply(crp, crp->crp_payload_start, + crp->crp_payload_length, + __DECONST(int (*)(void *, void *, u_int), + ses->hash_update), &sctx); ses->hash_finalize(res, &sctx); } Index: sys/crypto/ccp/ccp.c =================================================================== --- sys/crypto/ccp/ccp.c +++ sys/crypto/ccp/ccp.c @@ -91,20 +91,20 @@ * crypto operation buffer. */ static int -ccp_populate_sglist(struct sglist *sg, struct cryptop *crp) +ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - error = sglist_append_mbuf(sg, crp->crp_mbuf); + error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: - error = sglist_append_uio(sg, crp->crp_uio); + error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: - error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); + error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; default: error = EINVAL; @@ -607,7 +607,7 @@ goto out; qpheld = true; - error = ccp_populate_sglist(qp->cq_sg_crp, crp); + error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf); if (error != 0) goto out; Index: sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- sys/dev/cxgbe/crypto/t4_crypto.c +++ sys/dev/cxgbe/crypto/t4_crypto.c @@ -193,13 +193,16 @@ /* * Pre-allocate S/G lists used when preparing a work request. - * 'sg_crp' contains an sglist describing the entire buffer - * for a 'struct cryptop'. 'sg_ulptx' is used to describe - * the data the engine should DMA as input via ULPTX_SGL. - * 'sg_dsgl' is used to describe the destination that cipher - * text and a tag should be written to. + * 'sg_input' contains an sglist describing the entire input + * buffer for a 'struct cryptop'. 'sg_output' contains an + * sglist describing the entire output buffer. 'sg_ulptx' is + * used to describe the data the engine should DMA as input + * via ULPTX_SGL. 'sg_dsgl' is used to describe the + * destination that cipher text and a tag should be written + * to. */ - struct sglist *sg_crp; + struct sglist *sg_input; + struct sglist *sg_output; struct sglist *sg_ulptx; struct sglist *sg_dsgl; @@ -247,26 +250,26 @@ * requests. * * These scatter/gather lists can describe different subsets of the - * buffer described by the crypto operation. ccr_populate_sglist() - * generates a scatter/gather list that covers the entire crypto + * buffers described by the crypto operation. ccr_populate_sglist() + * generates a scatter/gather list that covers an entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int -ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) +ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - error = sglist_append_mbuf(sg, crp->crp_mbuf); + error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: - error = sglist_append_uio(sg, crp->crp_uio); + error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: - error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); + error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; default: error = EINVAL; @@ -495,7 +498,7 @@ } else { imm_len = 0; sglist_reset(sc->sg_ulptx); - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); @@ -617,10 +620,14 @@ op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; - + sglist_reset(sc->sg_dsgl); - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_payload_start, crp->crp_payload_length); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_payload_output_start, crp->crp_payload_length); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); @@ -645,7 +652,7 @@ } else { imm_len = 0; sglist_reset(sc->sg_ulptx); - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); @@ -844,13 +851,21 @@ iv_len + crp->crp_aad_length); if (error) return (error); - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_payload_start, crp->crp_payload_length); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_payload_output_start, crp->crp_payload_length); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_digest_start, hash_size_in_response); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_digest_start, hash_size_in_response); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_digest_start, hash_size_in_response); if (error) return (error); } @@ -903,17 +918,17 @@ imm_len = 0; sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); @@ -1153,13 +1168,21 @@ crp->crp_aad_length); if (error) return (error); - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_payload_start, crp->crp_payload_length); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_payload_output_start, crp->crp_payload_length); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_digest_start, hash_size_in_response); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_digest_start, hash_size_in_response); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_digest_start, hash_size_in_response); if (error) return (error); } @@ -1199,17 +1222,17 @@ imm_len = 0; sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); @@ -1592,13 +1615,21 @@ aad_len); if (error) return (error); - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_payload_start, crp->crp_payload_length); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_payload_output_start, crp->crp_payload_length); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { - error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, - crp->crp_digest_start, hash_size_in_response); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_output, + crp->crp_digest_start, hash_size_in_response); + else + error = sglist_append_sglist(sc->sg_dsgl, sc->sg_input, + crp->crp_digest_start, hash_size_in_response); if (error) return (error); } @@ -1640,17 +1671,17 @@ sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { - error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, + error = sglist_append_sglist(sc->sg_ulptx, sc->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); @@ -2077,7 +2108,8 @@ sc->adapter->ccr_softc = sc; mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); - sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK); + sc->sg_input = sglist_alloc(TX_SGL_SEGS, M_WAITOK); + sc->sg_output = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); @@ -2105,7 +2137,8 @@ free(sc->iv_aad_buf, M_CCR); sglist_free(sc->sg_dsgl); sglist_free(sc->sg_ulptx); - sglist_free(sc->sg_crp); + sglist_free(sc->sg_output); + sglist_free(sc->sg_input); sc->adapter->ccr_softc = NULL; return (0); } @@ -2291,7 +2324,7 @@ { unsigned int cipher_mode; - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: @@ -2573,7 +2606,9 @@ sc = device_get_softc(dev); mtx_lock(&sc->lock); - error = ccr_populate_sglist(sc->sg_crp, crp); + error = ccr_populate_sglist(sc->sg_input, &crp->crp_buf); + if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) + error = ccr_populate_sglist(sc->sg_output, &crp->crp_obuf); if (error) { sc->stats_sglist_error++; goto out; Index: sys/dev/hifn/hifn7751.c =================================================================== --- sys/dev/hifn/hifn7751.c +++ sys/dev/hifn/hifn7751.c @@ -1781,22 +1781,6 @@ return (idx); } -static bus_size_t -hifn_crp_length(struct cryptop *crp) -{ - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (crp->crp_mbuf->m_pkthdr.len); - case CRYPTO_BUF_UIO: - return (crp->crp_uio->uio_resid); - case CRYPTO_BUF_CONTIG: - return (crp->crp_ilen); - default: - panic("bad crp buffer type"); - } -} - static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error) { @@ -1852,12 +1836,12 @@ err = ENOMEM; goto err_srcmap1; } - cmd->src_mapsize = hifn_crp_length(crp); + cmd->src_mapsize = crypto_buffer_len(&crp->crp_buf); if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; - } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; @@ -1875,10 +1859,11 @@ * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; - if (crp->crp_mbuf->m_flags & M_PKTHDR) { + if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); - if (m0 && !m_dup_pkthdr(m0, crp->crp_mbuf, M_NOWAIT)) { + if (m0 && !m_dup_pkthdr(m0, crp->crp_buf.cb_mbuf, + M_NOWAIT)) { m_free(m0); m0 = NULL; } @@ -2105,7 +2090,7 @@ if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: - if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) m_freem(cmd->dst_m); } @@ -2684,7 +2669,7 @@ BUS_DMASYNC_POSTREAD); } - if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) { totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { @@ -2694,9 +2679,10 @@ } else totlen -= m->m_len; } - cmd->dst_m->m_pkthdr.len = crp->crp_mbuf->m_pkthdr.len; - m_freem(crp->crp_mbuf); - crp->crp_mbuf = cmd->dst_m; + cmd->dst_m->m_pkthdr.len = + crp->crp_buf.cb_mbuf->m_pkthdr.len; + m_freem(crp->crp_buf.cb_mbuf); + crp->crp_buf.cb_mbuf = cmd->dst_m; } } Index: sys/dev/safe/safe.c =================================================================== --- sys/dev/safe/safe.c +++ sys/dev/safe/safe.c @@ -784,22 +784,6 @@ return (0); } -static bus_size_t -safe_crp_length(struct cryptop *crp) -{ - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (crp->crp_mbuf->m_pkthdr.len); - case CRYPTO_BUF_UIO: - return (crp->crp_uio->uio_resid); - case CRYPTO_BUF_CONTIG: - return (crp->crp_ilen); - default: - panic("bad crp buffer type"); - } -} - static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { @@ -1040,7 +1024,7 @@ err = ENOMEM; goto errout; } - re->re_src_mapsize = safe_crp_length(crp); + re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); @@ -1107,7 +1091,7 @@ err = ENOMEM; goto errout; } - } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; @@ -1124,10 +1108,10 @@ if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; - if (crp->crp_mbuf->m_flags & M_PKTHDR) { + if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); - if (m && !m_dup_pkthdr(m, crp->crp_mbuf, + if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, M_NOWAIT)) { m_free(m); m = NULL; @@ -1212,8 +1196,8 @@ if (!(csp->csp_mode == CSP_MODE_ETA && (re->re_src.mapsize-oplen) == ses->ses_mlen && crp->crp_digest_start == oplen)) - safe_mcopy(crp->crp_mbuf, re->re_dst_m, - oplen); + safe_mcopy(crp->crp_buf.cb_mbuf, + re->re_dst_m, oplen); else safestats.st_noicvcopy++; } @@ -1349,7 +1333,10 @@ crp->crp_etype = EIO; /* something more meaningful? */ } - /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ + /* + * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if + * it is non-NULL? + */ if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, Index: sys/dev/ubsec/ubsec.c =================================================================== --- sys/dev/ubsec/ubsec.c +++ sys/dev/ubsec/ubsec.c @@ -961,22 +961,6 @@ return (0); } -static bus_size_t -ubsec_crp_length(struct cryptop *crp) -{ - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (crp->crp_mbuf->m_pkthdr.len); - case CRYPTO_BUF_UIO: - return (crp->crp_uio->uio_resid); - case CRYPTO_BUF_CONTIG: - return (crp->crp_ilen); - default: - panic("bad crp buffer type"); - } -} - static void ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { @@ -1137,7 +1121,7 @@ err = ENOMEM; goto errout; } - q->q_src_mapsize = ubsec_crp_length(crp); + q->q_src_mapsize = crypto_buffer_len(&crp->crp_buf); nicealign = ubsec_dmamap_aligned(&q->q_src); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); @@ -1206,7 +1190,7 @@ } else { if (nicealign) { q->q_dst = q->q_src; - } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { + } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; @@ -1214,17 +1198,17 @@ totlen = q->q_src_mapsize; if (totlen >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, - crp->crp_mbuf->m_flags & M_PKTHDR); + crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR); len = MCLBYTES; - } else if (crp->crp_mbuf->m_flags & M_PKTHDR) { + } else if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { m = m_gethdr(M_NOWAIT, MT_DATA); len = MHLEN; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } - if (m && crp->crp_mbuf->m_flags & M_PKTHDR && - !m_dup_pkthdr(m, crp->crp_mbuf, M_NOWAIT)) { + if (m && crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR && + !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, M_NOWAIT)) { m_free(m); m = NULL; } @@ -1258,7 +1242,8 @@ mp = &m->m_next; } q->q_dst_m = top; - ubsec_mcopy(crp->crp_mbuf, q->q_dst_m, cpskip, cpoffset); + ubsec_mcopy(crp->crp_buf.cb_mbuf, q->q_dst_m, cpskip, + cpoffset); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { ubsecstats.hst_nomap++; @@ -1429,8 +1414,8 @@ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); if (q->q_dst_m != NULL) { - m_freem(crp->crp_mbuf); - crp->crp_mbuf = q->q_dst_m; + m_freem(crp->crp_buf.cb_mbuf); + crp->crp_buf.cb_mbuf = q->q_dst_m; } if (csp->csp_auth_alg != 0) { Index: sys/geom/eli/g_eli_crypto.c =================================================================== --- sys/geom/eli/g_eli_crypto.c +++ sys/geom/eli/g_eli_crypto.c @@ -92,9 +92,7 @@ crp->crp_opaque = NULL; crp->crp_callback = g_eli_crypto_done; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_ilen = datasize; - crp->crp_buf = (void *)data; + crypto_use_buf(crp, data, datasize); error = crypto_dispatch(crp); if (error == 0) { Index: sys/geom/eli/g_eli_integrity.c =================================================================== --- sys/geom/eli/g_eli_integrity.c +++ sys/geom/eli/g_eli_integrity.c @@ -159,7 +159,7 @@ /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Which relative sector this request decrypted. */ - rel_sec = ((crp->crp_buf + crp->crp_payload_start) - + rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - (char *)bp->bio_driver2) / encr_secsize; errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + @@ -517,10 +517,8 @@ plaindata += data_secsize; } - crp->crp_ilen = sc->sc_alen + data_secsize; + crypto_use_buf(crp, data, sc->sc_alen + data_secsize); crp->crp_opaque = (void *)bp; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_buf = (void *)data; data += encr_secsize; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (g_eli_batch) Index: sys/geom/eli/g_eli_privacy.c =================================================================== --- sys/geom/eli/g_eli_privacy.c +++ sys/geom/eli/g_eli_privacy.c @@ -82,7 +82,7 @@ if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", bp->bio_inbed, bp->bio_children); - bp->bio_completed += crp->crp_ilen; + bp->bio_completed += crp->crp_payload_length; } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); @@ -265,10 +265,8 @@ for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); - crp->crp_ilen = secsize; + crypto_use_buf(crp, data, secsize); crp->crp_opaque = (void *)bp; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; - crp->crp_buf = (void *)data; data += secsize; if (bp->bio_cmd == BIO_WRITE) { crp->crp_op = CRYPTO_OP_ENCRYPT; Index: sys/kern/subr_bus_dma.c =================================================================== --- sys/kern/subr_bus_dma.c +++ sys/kern/subr_bus_dma.c @@ -639,8 +639,9 @@ } int -bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, - bus_dmamap_callback_t *callback, void *callback_arg, int flags) +bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, + struct crypto_buffer *cb, bus_dmamap_callback_t *callback, + void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; @@ -649,19 +650,21 @@ flags |= BUS_DMA_NOWAIT; nsegs = -1; error = 0; - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: - error = _bus_dmamap_load_buffer(dmat, map, crp->crp_buf, - crp->crp_ilen, kernel_pmap, flags, NULL, &nsegs); + error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, + cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); break; case CRYPTO_BUF_MBUF: - error = _bus_dmamap_load_mbuf_sg(dmat, map, crp->crp_mbuf, + error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_UIO: - error = _bus_dmamap_load_uio(dmat, map, crp->crp_uio, &nsegs, + error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; + default: + error = EINVAL; } nsegs++; @@ -686,3 +689,11 @@ return (0); } + +int +bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, + bus_dmamap_callback_t *callback, void *callback_arg, int flags) +{ + return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, + callback_arg, flags)); +} Index: sys/kgssapi/krb5/kcrypto_aes.c =================================================================== --- sys/kgssapi/krb5/kcrypto_aes.c +++ sys/kgssapi/krb5/kcrypto_aes.c @@ -156,9 +156,10 @@ memset(crp->crp_iv, 0, 16); } - crp->crp_buf_type = buftype; - crp->crp_buf = buf; - crp->crp_ilen = skip + len; + if (buftype == CRYPTO_BUF_MBUF) + crypto_use_mbuf(crp, buf); + else + crypto_use_buf(crp, buf, skip + len); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; @@ -328,9 +329,7 @@ crp->crp_payload_length = inlen; crp->crp_digest_start = skip + inlen; crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_buf_type = CRYPTO_BUF_MBUF; - crp->crp_mbuf = inout; - crp->crp_ilen = skip + inlen + 12; + crypto_use_mbuf(crp, inout); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; Index: sys/netipsec/xform_ah.c =================================================================== --- sys/netipsec/xform_ah.c +++ sys/netipsec/xform_ah.c @@ -676,13 +676,11 @@ } /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ah_input_cb; crp->crp_opaque = xd; @@ -717,7 +715,7 @@ int authsize, rplen, ahsize, error, skip, protoff; uint8_t nxt; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -1053,13 +1051,11 @@ } /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ah_output_cb; crp->crp_opaque = xd; @@ -1095,7 +1091,7 @@ u_int idx; int skip, error; - m = (struct mbuf *) crp->crp_buf; + m = crp->crp_buf.cb_mbuf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sp = xd->sp; Index: sys/netipsec/xform_esp.c =================================================================== --- sys/netipsec/xform_esp.c +++ sys/netipsec/xform_esp.c @@ -389,12 +389,10 @@ } /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = esp_input_cb; crp->crp_opaque = xd; @@ -469,7 +467,7 @@ crypto_session_t cryptoid; int hlen, skip, protoff, error, alen; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -863,12 +861,10 @@ xd->vnet = curvnet; /* Crypto operation descriptor. */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags |= CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = esp_output_cb; crp->crp_opaque = xd; @@ -907,7 +903,7 @@ xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); - m = (struct mbuf *) crp->crp_buf; + m = crp->crp_buf.cb_mbuf; sp = xd->sp; sav = xd->sav; idx = xd->idx; Index: sys/netipsec/xform_ipcomp.c =================================================================== --- sys/netipsec/xform_ipcomp.c +++ sys/netipsec/xform_ipcomp.c @@ -249,10 +249,8 @@ crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen); /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len - (skip + hlen); crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ipcomp_input_cb; crp->crp_opaque = xd; @@ -291,7 +289,7 @@ int skip, protoff; uint8_t nproto; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; @@ -506,10 +504,8 @@ xd->cryptoid = cryptoid; /* Crypto operation descriptor */ - crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_CBIFSYNC; - crp->crp_mbuf = m; - crp->crp_buf_type = CRYPTO_BUF_MBUF; + crypto_use_mbuf(crp, m); crp->crp_callback = ipcomp_output_cb; crp->crp_opaque = xd; @@ -537,7 +533,7 @@ u_int idx; int error, skip, protoff; - m = crp->crp_mbuf; + m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); idx = xd->idx; Index: sys/opencrypto/criov.c =================================================================== --- sys/opencrypto/criov.c +++ sys/opencrypto/criov.c @@ -60,7 +60,7 @@ } \ } while (0) -void +static void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) { struct iovec *iov = uio->uio_iov; @@ -80,7 +80,7 @@ } } -void +static void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) { struct iovec *iov = uio->uio_iov; @@ -103,7 +103,7 @@ /* * Return the index and offset of location in iovec list. */ -int +static int cuio_getptr(struct uio *uio, int loc, int *off) { int ind, len; @@ -128,11 +128,259 @@ return (-1); } +void +crypto_cursor_init(struct crypto_buffer_cursor *cc, + const struct crypto_buffer *cb) +{ + memset(cc, 0, sizeof(*cc)); + cc->cc_type = cb->cb_type; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + cc->cc_buf = cb->cb_buf; + cc->cc_buf_len = cb->cb_buf_len; + break; + case CRYPTO_BUF_MBUF: + cc->cc_mbuf = cb->cb_mbuf; + break; + case CRYPTO_BUF_UIO: + cc->cc_iov = cb->cb_uio->uio_iov; + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cb->cb_type); +#endif + break; + } +} + +void +crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount) +{ + size_t remain; + + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= amount); + cc->cc_buf += amount; + cc->cc_buf_len -= amount; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + remain = cc->cc_mbuf->m_len - cc->cc_offset; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + remain = cc->cc_iov->iov_len - cc->cc_offset; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_iov++; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +void * +crypto_cursor_segbase(struct crypto_buffer_cursor *cc) +{ + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + return (cc->cc_buf); + case CRYPTO_BUF_MBUF: + KASSERT((cc->cc_mbuf->m_flags & M_NOMAP) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); + case CRYPTO_BUF_UIO: + return ((char *)cc->cc_iov->iov_base + cc->cc_offset); + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + return (NULL); + } +} + +size_t +crypto_cursor_seglen(struct crypto_buffer_cursor *cc) +{ + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + return (cc->cc_buf_len); + case CRYPTO_BUF_MBUF: + return (cc->cc_mbuf->m_len - cc->cc_offset); + case CRYPTO_BUF_UIO: + return (cc->cc_iov->iov_len - cc->cc_offset); + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + return (0); + } +} + +void +crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, + const void *vsrc) +{ + size_t remain, todo; + const char *src; + char *dst; + + src = vsrc; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= size); + memcpy(cc->cc_buf, src, size); + cc->cc_buf += size; + cc->cc_buf_len -= size; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + KASSERT((cc->cc_mbuf->m_flags & M_NOMAP) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + dst = mtod(cc->cc_mbuf, char *) + cc->cc_offset; + remain = cc->cc_mbuf->m_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; + remain = cc->cc_iov->iov_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_iov++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +void +crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst) +{ + size_t remain, todo; + const char *src; + char *dst; + + dst = vdst; + switch (cc->cc_type) { + case CRYPTO_BUF_CONTIG: + MPASS(cc->cc_buf_len >= size); + memcpy(dst, cc->cc_buf, size); + cc->cc_buf += size; + cc->cc_buf_len -= size; + break; + case CRYPTO_BUF_MBUF: + for (;;) { + KASSERT((cc->cc_mbuf->m_flags & M_NOMAP) == 0, + ("%s: not supported for unmapped mbufs", __func__)); + src = mtod(cc->cc_mbuf, const char *) + cc->cc_offset; + remain = cc->cc_mbuf->m_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_mbuf = cc->cc_mbuf->m_next; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + case CRYPTO_BUF_UIO: + for (;;) { + src = (const char *)cc->cc_iov->iov_base + + cc->cc_offset; + remain = cc->cc_iov->iov_len - cc->cc_offset; + todo = MIN(remain, size); + memcpy(dst, src, todo); + dst += todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_iov++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; + default: +#ifdef INVARIANTS + panic("%s: invalid buffer type %d", __func__, cc->cc_type); +#endif + break; + } +} + +/* + * To avoid advancing 'cursor', make a local copy that gets advanced + * instead. + */ +void +crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, + void *vdst) +{ + struct crypto_buffer_cursor copy; + + copy = *cc; + crypto_cursor_copydata(©, size, vdst); +} + /* * Apply function f to the data in an iovec list starting "off" bytes from * the beginning, continuing for "len" bytes. */ -int +static int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void *arg) { @@ -159,19 +407,28 @@ void crypto_copyback(struct cryptop *crp, int off, int size, const void *src) { + struct crypto_buffer *cb; - switch (crp->crp_buf_type) { + if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) + cb = &crp->crp_obuf; + else + cb = &crp->crp_buf; + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - m_copyback(crp->crp_mbuf, off, size, src); + m_copyback(cb->cb_mbuf, off, size, src); break; case CRYPTO_BUF_UIO: - cuio_copyback(crp->crp_uio, off, size, src); + cuio_copyback(cb->cb_uio, off, size, src); break; case CRYPTO_BUF_CONTIG: - bcopy(src, crp->crp_buf + off, size); + MPASS(off + size <= cb->cb_buf_len); + bcopy(src, cb->cb_buf + off, size); break; default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crp buf type %d", cb->cb_type); +#endif + break; } } @@ -179,88 +436,57 @@ crypto_copydata(struct cryptop *crp, int off, int size, void *dst) { - switch (crp->crp_buf_type) { + switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: - m_copydata(crp->crp_mbuf, off, size, dst); + m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); break; case CRYPTO_BUF_UIO: - cuio_copydata(crp->crp_uio, off, size, dst); + cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); break; case CRYPTO_BUF_CONTIG: - bcopy(crp->crp_buf + off, dst, size); + MPASS(off + size <= crp->crp_buf.cb_buf_len); + bcopy(crp->crp_buf.cb_buf + off, dst, size); break; default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crp buf type %d", crp->crp_buf.cb_type); +#endif + break; } } int -crypto_apply(struct cryptop *crp, int off, int len, +crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, void *, u_int), void *arg) { int error; - switch (crp->crp_buf_type) { + switch (cb->cb_type) { case CRYPTO_BUF_MBUF: - error = m_apply(crp->crp_mbuf, off, len, f, arg); + error = m_apply(cb->cb_mbuf, off, len, f, arg); break; case CRYPTO_BUF_UIO: - error = cuio_apply(crp->crp_uio, off, len, f, arg); + error = cuio_apply(cb->cb_uio, off, len, f, arg); break; case CRYPTO_BUF_CONTIG: - error = (*f)(arg, crp->crp_buf + off, len); + MPASS(off + len <= cb->cb_buf_len); + error = (*f)(arg, cb->cb_buf + off, len); break; default: - panic("invalid crp buf type %d", crp->crp_buf_type); +#ifdef INVARIANTS + panic("invalid crypto buf type %d", cb->cb_type); +#endif + error = 0; + break; } return (error); } int -crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt, - int *allocated) +crypto_apply(struct cryptop *crp, int off, int len, + int (*f)(void *, void *, u_int), void *arg) { - struct iovec *iov; - struct mbuf *m, *mtmp; - int i, j; - - *allocated = 0; - iov = *iovptr; - if (iov == NULL) - *cnt = 0; - - m = mbuf; - i = 0; - while (m != NULL) { - if (i == *cnt) { - /* we need to allocate a larger array */ - j = 1; - mtmp = m; - while ((mtmp = mtmp->m_next) != NULL) - j++; - iov = malloc(sizeof *iov * (i + j), M_CRYPTO_DATA, - M_NOWAIT); - if (iov == NULL) - return ENOMEM; - *allocated = 1; - *cnt = i + j; - memcpy(iov, *iovptr, sizeof *iov * i); - } - - iov[i].iov_base = m->m_data; - iov[i].iov_len = m->m_len; - - i++; - m = m->m_next; - } - - if (*allocated) - KASSERT(*cnt == i, ("did not allocate correct amount: %d != %d", - *cnt, i)); - - *iovptr = iov; - *cnt = i; - return 0; + return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg)); } static inline void * @@ -299,18 +525,29 @@ return ((char *)uio->uio_iov[idx].iov_base + skip); } +void * +crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, + size_t len) +{ + + switch (cb->cb_type) { + case CRYPTO_BUF_MBUF: + return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); + case CRYPTO_BUF_UIO: + return (cuio_contiguous_segment(cb->cb_uio, skip, len)); + case CRYPTO_BUF_CONTIG: + MPASS(skip + len <= cb->cb_buf_len); + return (cb->cb_buf + skip); + default: +#ifdef INVARIANTS + panic("invalid crp buf type %d", cb->cb_type); +#endif + return (NULL); + } +} + void * crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) { - - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - return (m_contiguous_subsegment(crp->crp_mbuf, skip, len)); - case CRYPTO_BUF_UIO: - return (cuio_contiguous_segment(crp->crp_uio, skip, len)); - case CRYPTO_BUF_CONTIG: - return (crp->crp_buf + skip); - default: - panic("invalid crp buf type %d", crp->crp_buf_type); - } + return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len)); } Index: sys/opencrypto/crypto.c =================================================================== --- sys/opencrypto/crypto.c +++ sys/opencrypto/crypto.c @@ -69,12 +69,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include @@ -150,7 +152,7 @@ #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) -static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, +SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* @@ -772,7 +774,7 @@ struct auth_hash *axf; /* Mode-independent checks. */ - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~CSP_F_SEPARATE_OUTPUT) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) @@ -786,7 +788,7 @@ case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); - if (csp->csp_flags != 0) + if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || @@ -1229,20 +1231,66 @@ return err; } +size_t +crypto_buffer_len(struct crypto_buffer *cb) +{ + switch (cb->cb_type) { + case CRYPTO_BUF_CONTIG: + return (cb->cb_buf_len); + case CRYPTO_BUF_MBUF: + if (cb->cb_mbuf->m_flags & M_PKTHDR) + return (cb->cb_mbuf->m_pkthdr.len); + return (m_length(cb->cb_mbuf, NULL)); + case CRYPTO_BUF_UIO: + return (cb->cb_uio->uio_resid); + default: + return (0); + } +} + #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ +static void +cb_sanity(struct crypto_buffer *cb, const char *name) +{ + KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type < CRYPTO_BUF_LAST, + ("incoming crp with invalid %s buffer type", name)); + if (cb->cb_type == CRYPTO_BUF_CONTIG) + KASSERT(cb->cb_buf_len >= 0, + ("incoming crp with -ve %s buffer length", name)); +} + static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; + struct crypto_buffer *out; + size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); - KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length")); + KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && + crp->crp_obuf.cb_type < CRYPTO_BUF_LAST, + ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; + cb_sanity(&crp->crp_buf, "input"); + ilen = crypto_buffer_len(&crp->crp_buf); + olen = ilen; + out = NULL; + if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { + if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { + cb_sanity(&crp->crp_obuf, "output"); + out = &crp->crp_obuf; + olen = crypto_buffer_len(out); + } + } else + KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, + ("incoming crp with separate output buffer " + "but no session support")); + switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || @@ -1280,17 +1328,14 @@ ("invalid ETA op %x", crp->crp_op)); break; } - KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG && - crp->crp_buf_type <= CRYPTO_BUF_MBUF, - ("invalid crp buffer type %d", crp->crp_buf_type)); if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { KASSERT(crp->crp_aad_start == 0 || - crp->crp_aad_start < crp->crp_ilen, + crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || - crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen, + crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0, @@ -1305,25 +1350,39 @@ KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { - KASSERT(crp->crp_iv_start < crp->crp_ilen, + KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); - KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen, - ("IV outside input length")); + KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, + ("IV outside buffer length")); } + /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || - crp->crp_payload_start < crp->crp_ilen, + crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= - crp->crp_ilen, ("payload outside input length")); + ilen, ("payload outside input buffer")); + if (out == NULL) { + KASSERT(crp->crp_payload_output_start == 0, + ("payload output start non-zero without output buffer")); + } else { + KASSERT(crp->crp_payload_output_start < olen, + ("invalid payload output start")); + KASSERT(crp->crp_payload_output_start + + crp->crp_payload_length <= olen, + ("payload outside output buffer")); + } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { + if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) + len = ilen; + else + len = olen; KASSERT(crp->crp_digest_start == 0 || - crp->crp_digest_start < crp->crp_ilen, + crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ - KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= - crp->crp_ilen, - ("digest outside input length")); + KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, + ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); @@ -2166,10 +2225,10 @@ "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { - db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" + db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) - , crp->crp_ilen, crp->crp_olen + , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) Index: sys/opencrypto/cryptodev.h =================================================================== --- sys/opencrypto/cryptodev.h +++ sys/opencrypto/cryptodev.h @@ -406,7 +406,9 @@ int csp_flags; - int csp_ivlen; /* IV length in bytes. */ +#define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ + + int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ @@ -419,6 +421,47 @@ 0 means all. */ }; +enum crypto_buffer_type { + CRYPTO_BUF_NONE = 0, + CRYPTO_BUF_CONTIG, + CRYPTO_BUF_UIO, + CRYPTO_BUF_MBUF, + CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF +}; + +/* + * Description of a data buffer for a request. Requests can either + * have a single buffer that is modified in place or separate input + * and output buffers. + */ +struct crypto_buffer { + union { + struct { + char *cb_buf; + int cb_buf_len; + }; + struct mbuf *cb_mbuf; + struct uio *cb_uio; + }; + enum crypto_buffer_type cb_type; +}; + +/* + * A cursor is used to iterate through a crypto request data buffer. + */ +struct crypto_buffer_cursor { + union { + char *cc_buf; + struct mbuf *cc_mbuf; + struct iovec *cc_iov; + }; + union { + int cc_buf_len; + size_t cc_offset; + }; + enum crypto_buffer_type cc_type; +}; + /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; @@ -426,7 +469,6 @@ struct task crp_task; crypto_session_t crp_session; /* Session */ - int crp_ilen; /* Input data total length */ int crp_olen; /* Result total length */ int crp_etype; /* @@ -457,12 +499,8 @@ int crp_op; - union { - caddr_t crp_buf; /* Data to be processed */ - struct mbuf *crp_mbuf; - struct uio *crp_uio; - }; - int crp_buf_type; /* Which union member describes data. */ + struct crypto_buffer crp_buf; + struct crypto_buffer crp_obuf; int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ @@ -470,6 +508,7 @@ * the session. */ int crp_payload_start; /* Location of ciphertext. */ + int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. @@ -492,16 +531,72 @@ */ }; -#define CRYPTOP_ASYNC(crp) \ +static __inline void +_crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) +{ + cb->cb_buf = buf; + cb->cb_buf_len = len; + cb->cb_type = CRYPTO_BUF_CONTIG; +} + +static __inline void +_crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) +{ + cb->cb_mbuf = m; + cb->cb_type = CRYPTO_BUF_MBUF; +} + +static __inline void +_crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) +{ + cb->cb_uio = uio; + cb->cb_type = CRYPTO_BUF_UIO; +} + +static __inline void +crypto_use_buf(struct cryptop *crp, void *buf, int len) +{ + _crypto_use_buf(&crp->crp_buf, buf, len); +} + +static __inline void +crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) +{ + _crypto_use_mbuf(&crp->crp_buf, m); +} + +static __inline void +crypto_use_uio(struct cryptop *crp, struct uio *uio) +{ + _crypto_use_uio(&crp->crp_buf, uio); +} + +static __inline void +crypto_use_output_buf(struct cryptop *crp, void *buf, int len) +{ + _crypto_use_buf(&crp->crp_obuf, buf, len); +} + +static __inline void +crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) +{ + _crypto_use_mbuf(&crp->crp_obuf, m); +} + +static __inline void +crypto_use_output_uio(struct cryptop *crp, struct uio *uio) +{ + _crypto_use_uio(&crp->crp_obuf, uio); +} + +#define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) - -#define CRYPTO_BUF_CONTIG 0x0 -#define CRYPTO_BUF_UIO 0x1 -#define CRYPTO_BUF_MBUF 0x2 +#define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ + ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 @@ -571,6 +666,10 @@ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ +#ifdef SYSCTL_DECL +SYSCTL_DECL(_kern_crypto); +#endif + /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; @@ -582,26 +681,11 @@ /* * Crypto-related utility routines used mainly by drivers. * - * XXX these don't really belong here; but for now they're - * kept apart from the rest of the system. - * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ -struct uio; -extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); -extern void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp); -extern int cuio_getptr(struct uio *uio, int loc, int *off); -extern int cuio_apply(struct uio *uio, int off, int len, - int (*f)(void *, void *, u_int), void *arg); - -struct mbuf; -struct iovec; -extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, - int *cnt, int *allocated); - void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); @@ -610,6 +694,23 @@ void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); +int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, + int (*f)(void *, void *, u_int), void *arg); +void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, + size_t skip, size_t len); +size_t crypto_buffer_len(struct crypto_buffer *cb); +void crypto_cursor_init(struct crypto_buffer_cursor *cc, + const struct crypto_buffer *cb); +void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); +void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); +size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); +void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, + const void *vsrc); +void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, + void *vdst); +void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, + void *vdst); + static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { Index: sys/opencrypto/cryptodev.c =================================================================== --- sys/opencrypto/cryptodev.c +++ sys/opencrypto/cryptodev.c @@ -282,6 +282,7 @@ struct csession *cse; char *buf; + char *obuf; bool done; }; @@ -291,6 +292,11 @@ struct mtx lock; }; +static bool use_outputbuffers; +SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_use_output, CTLFLAG_RW, + &use_outputbuffers, 0, + "Use separate output buffers for /dev/crypto requests."); + static struct timeval warninterval = { .tv_sec = 60, .tv_usec = 0 }; SYSCTL_TIMEVAL_SEC(_kern, OID_AUTO, cryptodev_warn_interval, CTLFLAG_RW, &warninterval, @@ -574,6 +580,8 @@ return (EINVAL); memset(&csp, 0, sizeof(csp)); + if (use_outputbuffers) + csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; if (sop->cipher == CRYPTO_AES_NIST_GCM_16) { switch (sop->mac) { @@ -836,6 +844,8 @@ cod->cse = cse; cod->buf = malloc(len, M_XDATA, M_WAITOK); + if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_OUTPUT) + cod->obuf = malloc(len, M_XDATA, M_WAITOK); return (cod); } @@ -843,6 +853,7 @@ cod_free(struct cryptop_data *cod) { + free(cod->obuf, M_XDATA); free(cod->buf, M_XDATA); free(cod, M_XDATA); } @@ -979,6 +990,8 @@ case COP_ENCRYPT: case COP_DECRYPT: crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; + if (cod->obuf != NULL) + crp->crp_digest_start = 0; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); @@ -1008,10 +1021,10 @@ goto bail; } - crp->crp_ilen = cop->len + cse->hashsize; crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); - crp->crp_buf = cod->buf; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; + crypto_use_buf(crp, cod->buf, cop->len + cse->hashsize); + if (cod->obuf) + crypto_use_output_buf(crp, cod->obuf, cop->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; @@ -1033,8 +1046,9 @@ crp->crp_payload_length -= cse->ivsize; } - if (cop->mac != NULL) { - error = copyin(cop->mac, cod->buf + cop->len, cse->hashsize); + if (cop->mac != NULL && crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { + error = copyin(cop->mac, cod->buf + crp->crp_digest_start, + cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; @@ -1074,15 +1088,19 @@ } if (cop->dst != NULL) { - error = copyout(cod->buf, cop->dst, cop->len); + error = copyout(cod->obuf != NULL ? cod->obuf : + cod->buf + crp->crp_payload_start, + cop->dst + crp->crp_payload_start, + crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } - if (cop->mac != NULL) { - error = copyout(cod->buf + cop->len, cop->mac, cse->hashsize); + if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { + error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + + crp->crp_digest_start, cop->mac, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; @@ -1149,25 +1167,13 @@ } crp->crp_payload_start = caead->aadlen; crp->crp_payload_length = caead->len; - crp->crp_digest_start = caead->aadlen + caead->len; + if (caead->op == COP_ENCRYPT && cod->obuf != NULL) + crp->crp_digest_start = caead->len; + else + crp->crp_digest_start = caead->aadlen + caead->len; switch (cse->mode) { case CSP_MODE_AEAD: - switch (caead->op) { - case COP_ENCRYPT: - crp->crp_op = CRYPTO_OP_ENCRYPT | - CRYPTO_OP_COMPUTE_DIGEST; - break; - case COP_DECRYPT: - crp->crp_op = CRYPTO_OP_DECRYPT | - CRYPTO_OP_VERIFY_DIGEST; - break; - default: - SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); - error = EINVAL; - goto bail; - } - break; case CSP_MODE_ETA: switch (caead->op) { case COP_ENCRYPT: @@ -1190,10 +1196,12 @@ goto bail; } - crp->crp_ilen = caead->aadlen + caead->len + cse->hashsize; crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); - crp->crp_buf = cod->buf; - crp->crp_buf_type = CRYPTO_BUF_CONTIG; + crypto_use_buf(crp, cod->buf, caead->aadlen + caead->len + + cse->hashsize); + if (cod->obuf != NULL) + crypto_use_output_buf(crp, cod->obuf, caead->len + + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; @@ -1225,11 +1233,13 @@ crp->crp_payload_length -= cse->ivsize; } - error = copyin(caead->tag, cod->buf + caead->len + caead->aadlen, - cse->hashsize); - if (error) { - SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); - goto bail; + if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { + error = copyin(caead->tag, cod->buf + crp->crp_digest_start, + cse->hashsize); + if (error) { + SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); + goto bail; + } } cryptodev_warn(cse); again: @@ -1265,19 +1275,23 @@ } if (caead->dst != NULL) { - error = copyout(cod->buf + caead->aadlen, caead->dst, - caead->len); + error = copyout(cod->obuf != NULL ? cod->obuf : + cod->buf + crp->crp_payload_start, + caead->dst + crp->crp_payload_start, + crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } - error = copyout(cod->buf + caead->aadlen + caead->len, caead->tag, - cse->hashsize); - if (error) { - SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); - goto bail; + if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { + error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + + crp->crp_digest_start, caead->tag, cse->hashsize); + if (error) { + SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); + goto bail; + } } bail: Index: sys/opencrypto/cryptosoft.c =================================================================== --- sys/opencrypto/cryptosoft.c +++ sys/opencrypto/cryptosoft.c @@ -110,11 +110,9 @@ const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; - int i, j, k, blks, ind, count, ivlen; - struct uio *uio, uiolcl; - struct iovec iovlcl[4]; - struct iovec *iov; - int iovcnt, iovalloc; + int i, j, blks, ivlen, resid, seglen, todo; + struct crypto_buffer_cursor cc, cc_out; + char *segbase; int error; bool encrypting; @@ -146,32 +144,6 @@ return (error); } - iov = iovlcl; - iovcnt = nitems(iovlcl); - iovalloc = 0; - uio = &uiolcl; - switch (crp->crp_buf_type) { - case CRYPTO_BUF_MBUF: - error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt, - &iovalloc); - if (error) - return (error); - uio->uio_iov = iov; - uio->uio_iovcnt = iovcnt; - break; - case CRYPTO_BUF_UIO: - uio = crp->crp_uio; - break; - case CRYPTO_BUF_CONTIG: - iov[0].iov_base = crp->crp_buf; - iov[0].iov_len = crp->crp_ilen; - uio->uio_iov = iov; - uio->uio_iovcnt = 1; - break; - } - - ivp = iv; - if (exf->reinit) { /* * xforms that provide a reinit method perform all IV @@ -180,158 +152,223 @@ exf->reinit(sw->sw_kschedule, iv); } - count = crp->crp_payload_start; - ind = cuio_getptr(uio, count, &k); - if (ind == -1) { - error = EINVAL; - goto out; - } + ivp = iv; - i = crp->crp_payload_length; + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + + resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); - while (i > 0) { + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { /* - * If there's insufficient data at the end of - * an iovec, we have to do some copying. + * The enc_xform interface always modifies its + * buffer in place, so for requests with a + * separate output buffer we must copy all of + * the data block by block. */ - if (uio->uio_iov[ind].iov_len < k + blks && - uio->uio_iov[ind].iov_len != k) { - cuio_copydata(uio, count, blks, blk); + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); - /* Actual encryption/decryption */ - if (exf->reinit) { - if (encrypting) { - exf->encrypt(sw->sw_kschedule, - blk); - } else { - exf->decrypt(sw->sw_kschedule, - blk); - } - } else if (encrypting) { - /* XOR with previous block */ - for (j = 0; j < blks; j++) - blk[j] ^= ivp[j]; - - exf->encrypt(sw->sw_kschedule, blk); - - /* - * Keep encrypted block for XOR'ing - * with next block - */ - bcopy(blk, iv, blks); - ivp = iv; - } else { /* decrypt */ - /* - * Keep encrypted block for XOR'ing - * with next block - */ - nivp = (ivp == iv) ? iv2 : iv; - bcopy(blk, nivp, blks); - - exf->decrypt(sw->sw_kschedule, blk); - - /* XOR with previous block */ - for (j = 0; j < blks; j++) - blk[j] ^= ivp[j]; - - ivp = nivp; - } - - /* Copy back decrypted block */ - cuio_copyback(uio, count, blks, blk); - - count += blks; - - /* Advance pointer */ - ind = cuio_getptr(uio, count, &k); - if (ind == -1) { - error = EINVAL; - goto out; - } - - i -= blks; - - /* Could be done... */ - if (i == 0) - break; - } - - while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { - uint8_t *idat; - size_t nb, rem; + /* + * If multiblock routines are available, batch up as + * many blocks as will fit in 'blk' for each + * iteration. + */ + if (exf->reinit && + ((encrypting && exf->encrypt_multi != NULL) || + (!encrypting && exf->decrypt_multi != NULL))) { + while (resid > 0) { + todo = min(resid, rounddown(sizeof(blk), blks)); - nb = blks; - rem = MIN((size_t)i, - uio->uio_iov[ind].iov_len - (size_t)k); - idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; + crypto_cursor_copydata(&cc, todo, blk); - if (exf->reinit) { - if (encrypting && exf->encrypt_multi == NULL) - exf->encrypt(sw->sw_kschedule, - idat); - else if (encrypting) { - nb = rounddown(rem, blks); + if (encrypting) exf->encrypt_multi(sw->sw_kschedule, - idat, nb); - } else if (exf->decrypt_multi == NULL) - exf->decrypt(sw->sw_kschedule, - idat); - else { - nb = rounddown(rem, blks); + blk, todo); + else exf->decrypt_multi(sw->sw_kschedule, - idat, nb); + blk, todo); + + crypto_cursor_copyback(&cc_out, todo, blk); + + resid -= todo; + continue; + } + return (0); + } + + while (resid > 0) { + crypto_cursor_copydata(&cc, blks, blk); + + /* Actual encryption/decryption */ + if (exf->reinit) { + if (encrypting) { + exf->encrypt(sw->sw_kschedule, + blk); + } else { + exf->decrypt(sw->sw_kschedule, + blk); } } else if (encrypting) { - /* XOR with previous block/IV */ + /* XOR with previous block */ for (j = 0; j < blks; j++) - idat[j] ^= ivp[j]; + blk[j] ^= ivp[j]; - exf->encrypt(sw->sw_kschedule, idat); - ivp = idat; + exf->encrypt(sw->sw_kschedule, blk); + + /* + * Keep encrypted block for XOR'ing + * with next block + */ + bcopy(blk, iv, blks); + ivp = iv; } else { /* decrypt */ /* - * Keep encrypted block to be used - * in next block's processing. + * Keep encrypted block for XOR'ing + * with next block */ nivp = (ivp == iv) ? iv2 : iv; - bcopy(idat, nivp, blks); + bcopy(blk, nivp, blks); - exf->decrypt(sw->sw_kschedule, idat); + exf->decrypt(sw->sw_kschedule, blk); - /* XOR with previous block/IV */ + /* XOR with previous block */ for (j = 0; j < blks; j++) - idat[j] ^= ivp[j]; + blk[j] ^= ivp[j]; ivp = nivp; } - count += nb; - k += nb; - i -= nb; + crypto_cursor_copyback(&cc_out, blks, blk); + + resid -= blks; } + return (0); + } + while (resid > 0) { /* - * Advance to the next iov if the end of the current iov - * is aligned with the end of a cipher block. - * Note that the code is equivalent to calling: - * ind = cuio_getptr(uio, count, &k); + * If the current segment doesn't hold an entire block, we + * have to encrypt/decrypt in a local buffer. */ - if (i > 0 && k == uio->uio_iov[ind].iov_len) { - k = 0; - ind++; - if (ind >= uio->uio_iovcnt) { - error = EINVAL; - goto out; + seglen = crypto_cursor_seglen(&cc); + if (seglen < blks) { + crypto_cursor_copydata_noadv(&cc, blks, blk); + + /* Actual encryption/decryption */ + if (exf->reinit) { + if (encrypting) { + exf->encrypt(sw->sw_kschedule, + blk); + } else { + exf->decrypt(sw->sw_kschedule, + blk); + } + } else if (encrypting) { + /* XOR with previous block */ + for (j = 0; j < blks; j++) + blk[j] ^= ivp[j]; + + exf->encrypt(sw->sw_kschedule, blk); + + /* + * Keep encrypted block for XOR'ing + * with next block + */ + bcopy(blk, iv, blks); + ivp = iv; + } else { /* decrypt */ + /* + * Keep encrypted block for XOR'ing + * with next block + */ + nivp = (ivp == iv) ? iv2 : iv; + bcopy(blk, nivp, blks); + + exf->decrypt(sw->sw_kschedule, blk); + + /* XOR with previous block */ + for (j = 0; j < blks; j++) + blk[j] ^= ivp[j]; + + ivp = nivp; } + + crypto_cursor_copyback(&cc, blks, blk); + + resid -= blks; + continue; } + + /* Encrypt/decrypt contiguous blocks in the current segment. */ + segbase = crypto_cursor_segbase(&cc); + todo = min(resid, rounddown(seglen, blks)); + + /* Try to use multi-block methods. */ + if (exf->reinit && + ((encrypting && exf->encrypt_multi != NULL) || + (!encrypting && exf->decrypt_multi != NULL))) { + if (encrypting) + exf->encrypt_multi(sw->sw_kschedule, segbase, + todo); + else + exf->decrypt_multi(sw->sw_kschedule, segbase, + todo); + crypto_cursor_advance(&cc, todo); + resid -= todo; + continue; + } + + /* Encrypt/decrypt each block individually. */ + for (i = 0; i < todo / blks; i++) { + /* Actual encryption/decryption */ + if (exf->reinit) { + if (encrypting) { + exf->encrypt(sw->sw_kschedule, + segbase); + } else { + exf->decrypt(sw->sw_kschedule, + segbase); + } + } else if (encrypting) { + /* XOR with previous block */ + for (j = 0; j < blks; j++) + segbase[j] ^= ivp[j]; + + exf->encrypt(sw->sw_kschedule, segbase); + + /* + * Keep encrypted block for XOR'ing + * with next block + */ + bcopy(segbase, iv, blks); + ivp = iv; + } else { /* decrypt */ + /* + * Keep encrypted block for XOR'ing + * with next block + */ + nivp = (ivp == iv) ? iv2 : iv; + bcopy(segbase, nivp, blks); + + exf->decrypt(sw->sw_kschedule, segbase); + + /* XOR with previous block */ + for (j = 0; j < blks; j++) + segbase[j] ^= ivp[j]; + + ivp = nivp; + } + + segbase += blks; + } + + crypto_cursor_advance(&cc, todo); + resid -= todo; } -out: - if (iovalloc) - free(iov, M_CRYPTO_DATA); - - return (error); + return (0); } static void @@ -414,8 +451,15 @@ if (err) return err; - err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, - (int (*)(void *, void *, unsigned int))axf->Update, &ctx); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && + CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + err = crypto_apply_buf(&crp->crp_obuf, + crp->crp_payload_output_start, crp->crp_payload_length, + (int (*)(void *, void *, unsigned int))axf->Update, &ctx); + else + err = crypto_apply(crp, crp->crp_payload_start, + crp->crp_payload_length, + (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; @@ -491,11 +535,12 @@ u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; - int blksz, i, ivlen, len; + int blksz, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -508,9 +553,11 @@ crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -544,13 +591,14 @@ u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc, cc_out; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; - int blksz, i, ivlen, len, r; + int blksz, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -572,9 +620,11 @@ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ - for (i = 0; i < crp->crp_aad_length; i += blksz) { - len = MIN(crp->crp_aad_length - i, blksz); - crypto_copydata(crp, crp->crp_aad_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_aad_start); + for (resid = crp->crp_aad_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -582,16 +632,22 @@ exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ - for (i = 0; i < crp->crp_payload_length; i += len) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc; + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_copydata(&cc, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(swe->sw_kschedule, blk); axf->Update(&ctx, blk, len); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } else { axf->Update(&ctx, blk, len); } @@ -618,15 +674,16 @@ return (EBADMSG); /* tag matches, decrypt data */ - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; + resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copydata(&cc, len, blk); exf->decrypt(swe->sw_kschedule, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } } else { /* Inject the authentication data */ @@ -645,10 +702,11 @@ u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; - int blksz, i, ivlen, len; + int blksz, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -668,9 +726,11 @@ ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_aad_start); + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -698,12 +758,13 @@ u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; + struct crypto_buffer_cursor cc, cc_out; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; - int blksz, i, ivlen, len, r; + int blksz, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; @@ -732,9 +793,11 @@ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ - for (i = 0; i < crp->crp_aad_length; i += blksz) { - len = MIN(crp->crp_aad_length - i, blksz); - crypto_copydata(crp, crp->crp_aad_start + i, len, blk); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_aad_start); + for (resid = crp->crp_aad_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); + crypto_cursor_copydata(&cc, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } @@ -742,16 +805,22 @@ exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ - for (i = 0; i < crp->crp_payload_length; i += len) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { + crypto_cursor_init(&cc_out, &crp->crp_obuf); + crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); + } else + cc_out = cc; + for (resid = crp->crp_payload_length; resid > 0; resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, blk); + crypto_cursor_copydata(&cc, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, len); exf->encrypt(swe->sw_kschedule, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } else { /* * One of the problems with CCM+CBC is that @@ -780,15 +849,16 @@ /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); - for (i = 0; i < crp->crp_payload_length; i += blksz) { - len = MIN(crp->crp_payload_length - i, blksz); + crypto_cursor_init(&cc, &crp->crp_buf); + crypto_cursor_advance(&cc, crp->crp_payload_start); + for (resid = crp->crp_payload_length; resid > 0; + resid -= len) { + len = MIN(resid, blksz); if (len < blksz) bzero(blk, blksz); - crypto_copydata(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copydata(&cc, len, blk); exf->decrypt(swe->sw_kschedule, blk); - crypto_copyback(crp, crp->crp_payload_start + i, len, - blk); + crypto_cursor_copyback(&cc_out, len, blk); } } else { /* Inject the authentication data */ @@ -867,13 +937,13 @@ */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { - switch (crp->crp_buf_type) { + switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; - m_adj(crp->crp_mbuf, adj); + m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { - struct uio *uio = crp->crp_uio; + struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; @@ -892,6 +962,8 @@ } } break; + default: + break; } } free(out, M_CRYPTO_DATA); @@ -1213,7 +1285,7 @@ swcr_probesession(device_t dev, const struct crypto_session_params *csp) { - if (csp->csp_flags != 0) + if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: Index: sys/opencrypto/ktls_ocf.c =================================================================== --- sys/opencrypto/ktls_ocf.c +++ sys/opencrypto/ktls_ocf.c @@ -73,11 +73,26 @@ CTLFLAG_RD, &ocf_tls13_gcm_crypts, "Total number of OCF TLS 1.3 GCM encryption operations"); +static counter_u64_t ocf_inplace; +SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace, + CTLFLAG_RD, &ocf_inplace, + "Total number of OCF in-place operations"); + +static counter_u64_t ocf_separate_output; +SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output, + CTLFLAG_RD, &ocf_separate_output, + "Total number of OCF operations with a separate output buffer"); + static counter_u64_t ocf_retries; SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD, &ocf_retries, "Number of OCF encryption operation retries"); +static bool force_inplace = false; +SYSCTL_BOOL(_kern_ipc_tls, OID_AUTO, ocf_force_inplace, CTLFLAG_RW, + &force_inplace, 0, + "Force in-place crypto"); + static int ktls_ocf_callback(struct cryptop *crp) { @@ -97,21 +112,33 @@ struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type __unused) { - struct uio uio; + struct uio uio, out_uio, *tag_uio; struct tls_aead_data ad; struct cryptop *crp; struct ocf_session *os; struct ocf_operation *oo; - struct iovec *iov; + struct iovec *iov, *out_iov; int i, error; uint16_t tls_comp_len; + bool inplace; os = tls->cipher; - oo = malloc(sizeof(*oo) + (iovcnt + 2) * sizeof(*iov), M_KTLS_OCF, + oo = malloc(sizeof(*oo) + (iovcnt + 2) * sizeof(*iov) * 2, M_KTLS_OCF, M_WAITOK | M_ZERO); oo->os = os; iov = oo->iov; + out_iov = iov + iovcnt + 2; + + uio.uio_iov = iov; + uio.uio_offset = 0; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_td = curthread; + + out_uio.uio_iov = out_iov; + out_uio.uio_offset = 0; + out_uio.uio_segflg = UIO_SYSSPACE; + out_uio.uio_td = curthread; crp = crypto_getreq(os->sid, M_WAITOK); @@ -129,46 +156,58 @@ ad.tls_length = htons(tls_comp_len); iov[0].iov_base = &ad; iov[0].iov_len = sizeof(ad); - uio.uio_resid = sizeof(ad); + crp->crp_aad_start = 0; + crp->crp_aad_length = sizeof(ad); - /* - * OCF always does encryption in place, so copy the data if - * needed. Ugh. - */ + /* Copy iov's. */ + if (force_inplace) + memcpy(iov + 1, outiov, iovcnt * sizeof(*iov)); + else + memcpy(iov + 1, iniov, iovcnt * sizeof(*iov)); + uio.uio_iovcnt = iovcnt + 1; + memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov)); + out_uio.uio_iovcnt = iovcnt; + + /* Compute payload length and determine if encryption is in place. */ + inplace = true; + crp->crp_payload_start = sizeof(ad); for (i = 0; i < iovcnt; i++) { - iov[i + 1] = outiov[i]; - if (iniov[i].iov_base != outiov[i].iov_base) - memcpy(outiov[i].iov_base, iniov[i].iov_base, - outiov[i].iov_len); - uio.uio_resid += outiov[i].iov_len; + if (iniov[i].iov_base != outiov[i].iov_base) { + if (force_inplace) + memcpy(outiov[i].iov_base, iniov[i].iov_base, + outiov[i].iov_len); + else + inplace = false; + } + crp->crp_payload_length += iniov[i].iov_len; } + uio.uio_resid = sizeof(ad) + crp->crp_payload_length; + out_uio.uio_resid = crp->crp_payload_length; - iov[iovcnt + 1].iov_base = trailer; - iov[iovcnt + 1].iov_len = AES_GMAC_HASH_LEN; - uio.uio_resid += AES_GMAC_HASH_LEN; + if (inplace) + tag_uio = &uio; + else + tag_uio = &out_uio; - uio.uio_iov = iov; - uio.uio_iovcnt = iovcnt + 2; - uio.uio_offset = 0; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; + tag_uio->uio_iov[tag_uio->uio_iovcnt].iov_base = trailer; + tag_uio->uio_iov[tag_uio->uio_iovcnt].iov_len = AES_GMAC_HASH_LEN; + tag_uio->uio_iovcnt++; + crp->crp_digest_start = tag_uio->uio_resid; + tag_uio->uio_resid += AES_GMAC_HASH_LEN; crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; - crp->crp_buf_type = CRYPTO_BUF_UIO; - crp->crp_uio = &uio; - crp->crp_ilen = uio.uio_resid; + crypto_use_uio(crp, &uio); + if (!inplace) + crypto_use_output_uio(crp, &out_uio); crp->crp_opaque = oo; crp->crp_callback = ktls_ocf_callback; - crp->crp_aad_start = 0; - crp->crp_aad_length = sizeof(ad); - crp->crp_payload_start = sizeof(ad); - crp->crp_payload_length = crp->crp_ilen - - (sizeof(ad) + AES_GMAC_HASH_LEN); - crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN; - counter_u64_add(ocf_tls12_gcm_crypts, 1); + if (inplace) + counter_u64_add(ocf_inplace, 1); + else + counter_u64_add(ocf_separate_output, 1); for (;;) { error = crypto_dispatch(crp); if (error) @@ -200,22 +239,35 @@ const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov, struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type) { - struct uio uio; + struct uio uio, out_uio; struct tls_aead_data_13 ad; char nonce[12]; struct cryptop *crp; struct ocf_session *os; struct ocf_operation *oo; - struct iovec *iov; + struct iovec *iov, *out_iov; int i, error; + bool inplace; os = tls->cipher; - oo = malloc(sizeof(*oo) + (iovcnt + 2) * sizeof(*iov), M_KTLS_OCF, + oo = malloc(sizeof(*oo) + (iovcnt + 2) * sizeof(*iov) * 2, M_KTLS_OCF, M_WAITOK | M_ZERO); oo->os = os; iov = oo->iov; + out_iov = iov + iovcnt + 2; + + uio.uio_iov = iov; + uio.uio_offset = 0; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_td = curthread; + + out_uio.uio_iov = out_iov; + out_uio.uio_offset = 0; + out_uio.uio_segflg = UIO_SYSSPACE; + out_uio.uio_td = curthread; + crp = crypto_getreq(os->sid, M_WAITOK); /* Setup the nonce. */ @@ -229,48 +281,67 @@ ad.tls_length = hdr->tls_length; iov[0].iov_base = &ad; iov[0].iov_len = sizeof(ad); - uio.uio_resid = sizeof(ad); + crp->crp_aad_start = 0; + crp->crp_aad_length = sizeof(ad); - /* - * OCF always does encryption in place, so copy the data if - * needed. Ugh. - */ + /* Copy iov's. */ + if (force_inplace) + memcpy(iov + 1, outiov, iovcnt * sizeof(*iov)); + else + memcpy(iov + 1, iniov, iovcnt * sizeof(*iov)); + uio.uio_iovcnt = iovcnt + 1; + memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov)); + out_uio.uio_iovcnt = iovcnt; + + /* Compute payload length and determine if encryption is in place. */ + inplace = true; + crp->crp_payload_start = sizeof(ad); for (i = 0; i < iovcnt; i++) { - iov[i + 1] = outiov[i]; - if (iniov[i].iov_base != outiov[i].iov_base) - memcpy(outiov[i].iov_base, iniov[i].iov_base, - outiov[i].iov_len); - uio.uio_resid += outiov[i].iov_len; + if (iniov[i].iov_base != outiov[i].iov_base) { + if (force_inplace) + memcpy(outiov[i].iov_base, iniov[i].iov_base, + outiov[i].iov_len); + else + inplace = false; + } + crp->crp_payload_length += iniov[i].iov_len; } + uio.uio_resid = sizeof(ad) + crp->crp_payload_length; + out_uio.uio_resid = crp->crp_payload_length; + /* + * Always include the full trailer as input to get the + * record_type even if only the first byte is used. + */ trailer[0] = record_type; iov[iovcnt + 1].iov_base = trailer; iov[iovcnt + 1].iov_len = AES_GMAC_HASH_LEN + 1; + uio.uio_iovcnt++; uio.uio_resid += AES_GMAC_HASH_LEN + 1; - - uio.uio_iov = iov; - uio.uio_iovcnt = iovcnt + 2; - uio.uio_offset = 0; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_td = curthread; + if (inplace) { + crp->crp_digest_start = uio.uio_resid - AES_GMAC_HASH_LEN; + } else { + out_iov[iovcnt] = iov[iovcnt + 1]; + out_uio.uio_iovcnt++; + out_uio.uio_resid += AES_GMAC_HASH_LEN + 1; + crp->crp_digest_start = out_uio.uio_resid - AES_GMAC_HASH_LEN; + } crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE; - crp->crp_buf_type = CRYPTO_BUF_UIO; - crp->crp_uio = &uio; - crp->crp_ilen = uio.uio_resid; + crypto_use_uio(crp, &uio); + if (!inplace) + crypto_use_output_uio(crp, &out_uio); crp->crp_opaque = oo; crp->crp_callback = ktls_ocf_callback; - crp->crp_aad_start = 0; - crp->crp_aad_length = sizeof(ad); - crp->crp_payload_start = sizeof(ad); - crp->crp_payload_length = crp->crp_ilen - - (sizeof(ad) + AES_GMAC_HASH_LEN); - crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN; memcpy(crp->crp_iv, nonce, sizeof(nonce)); counter_u64_add(ocf_tls13_gcm_crypts, 1); + if (inplace) + counter_u64_add(ocf_inplace, 1); + else + counter_u64_add(ocf_separate_output, 1); for (;;) { error = crypto_dispatch(crp); if (error) @@ -317,6 +388,7 @@ int error; memset(&csp, 0, sizeof(csp)); + csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; switch (tls->params.cipher_algorithm) { case CRYPTO_AES_NIST_GCM_16: @@ -380,6 +452,8 @@ case MOD_LOAD: ocf_tls12_gcm_crypts = counter_u64_alloc(M_WAITOK); ocf_tls13_gcm_crypts = counter_u64_alloc(M_WAITOK); + ocf_inplace = counter_u64_alloc(M_WAITOK); + ocf_separate_output = counter_u64_alloc(M_WAITOK); ocf_retries = counter_u64_alloc(M_WAITOK); return (ktls_crypto_backend_register(&ocf_backend)); case MOD_UNLOAD: @@ -388,6 +462,8 @@ return (error); counter_u64_free(ocf_tls12_gcm_crypts); counter_u64_free(ocf_tls13_gcm_crypts); + counter_u64_free(ocf_inplace); + counter_u64_free(ocf_separate_output); counter_u64_free(ocf_retries); return (0); default: Index: sys/sys/bus_dma.h =================================================================== --- sys/sys/bus_dma.h +++ sys/sys/bus_dma.h @@ -111,6 +111,7 @@ /* Forwards needed by prototypes below. */ union ccb; struct bio; +struct crypto_buffer; struct cryptop; struct mbuf; struct memdesc; @@ -270,6 +271,10 @@ int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags); +int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, + struct crypto_buffer *cb, + bus_dmamap_callback_t *callback, + void *callback_arg, int flags); /* * Loads any memory descriptor.