Index: sys/crypto/ccp/ccp.c =================================================================== --- sys/crypto/ccp/ccp.c +++ sys/crypto/ccp/ccp.c @@ -107,6 +107,10 @@ case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_UNMAPPED: + error = sglist_append_vmpages(sg, cb->cb_unmapped, + cb->cb_unmapped_len, cb->cb_unmapped_offset); + break; default: error = EINVAL; } Index: sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- sys/dev/cxgbe/crypto/t4_crypto.c +++ sys/dev/cxgbe/crypto/t4_crypto.c @@ -272,6 +272,10 @@ case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_UNMAPPED: + error = sglist_append_vmpages(sg, cb->cb_unmapped, + cb->cb_unmapped_len, cb->cb_unmapped_offset); + break; default: error = EINVAL; } Index: sys/dev/sec/sec.c =================================================================== --- sys/dev/sec/sec.c +++ sys/dev/sec/sec.c @@ -851,6 +851,9 @@ case CRYPTO_BUF_MBUF: size = m_length(crp->crp_buf.cb_mbuf, NULL); break; + case CRYPTO_BUF_UNMAPPED: + size = PAGE_SIZE - cb->cb_unmapped_offset; + break; default: return (EINVAL); } Index: sys/geom/eli/g_eli.c =================================================================== --- sys/geom/eli/g_eli.c +++ sys/geom/eli/g_eli.c @@ -49,6 +49,8 @@ #include #include +#include + #include #include @@ -972,6 +974,16 @@ */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; + if (CRYPTO_HAS_UNMAPPED) { + /* + * On DMAP architectures we can use unmapped I/O. But don't + * use it with data integrity verification. That module needs + * much more intimate access to the bio's data in order to + * add/remove HMAC fields, and split bios that exceed MAXPHYS. + */ + if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0) + pp->flags |= G_PF_ACCEPT_UNMAPPED; + } pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; LIST_FOREACH(gap, &bpp->aliases, ga_next) Index: sys/geom/eli/g_eli_privacy.c =================================================================== --- sys/geom/eli/g_eli_privacy.c +++ sys/geom/eli/g_eli_privacy.c @@ -63,6 +63,28 @@ MALLOC_DECLARE(M_ELI); /* + * Copy data from a (potentially unmapped) bio to a kernelspace buffer. + * + * The buffer must have at least as much room as bp->bio_length. + */ +static void +g_eli_bio_copyin(struct bio *bp, void *kaddr) +{ + struct uio uio; + struct iovec iov[1]; + + iov[0].iov_base = kaddr; + iov[0].iov_len = bp->bio_length; + uio.uio_iov = iov; + uio.uio_iovcnt = 1; + uio.uio_offset = 0; + uio.uio_resid = bp->bio_length; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_READ; + uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio); +} + +/* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver @@ -98,8 +120,7 @@ */ if (bp->bio_inbed < bp->bio_children) return (0); - free(bp->bio_driver2, M_ELI); - bp->bio_driver2 = NULL; + if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); @@ -167,6 +188,11 @@ return (0); } cbp->bio_data = bp->bio_driver2; + /* + * Clear BIO_UNMAPPED, which was inherited from where we cloned the bio + * in g_eli_start, because we manually set bio_data + */ + cbp->bio_flags &= ~BIO_UNMAPPED; cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; @@ -236,10 +262,12 @@ { struct g_eli_softc *sc; struct cryptop *crp; + vm_page_t *pages; u_int i, nsec, secsize; off_t dstoff; - u_char *data; + u_char *data = NULL; int error; + int pages_offset; G_ELI_LOGREQ(3, bp, "%s", __func__); @@ -258,16 +286,37 @@ if (bp->bio_cmd == BIO_WRITE) { data = malloc(bp->bio_length, M_ELI, M_WAITOK); bp->bio_driver2 = data; - bcopy(bp->bio_data, data, bp->bio_length); - } else - data = bp->bio_data; + /* + * This copy could be eliminated by using crypto's output + * buffer, instead of using a single overwriting buffer. + */ + if ((bp->bio_flags & BIO_UNMAPPED) != 0) + g_eli_bio_copyin(bp, data); + else + bcopy(bp->bio_data, data, bp->bio_length); + } else { + if ((bp->bio_flags & BIO_UNMAPPED) != 0) { + pages = bp->bio_ma; + pages_offset = bp->bio_ma_offset; + } else { + data = bp->bio_data; + } + } for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); - crypto_use_buf(crp, data, secsize); + if (data) { + crypto_use_buf(crp, data, secsize); + data += secsize; + } else { + MPASS(pages != NULL); + crypto_use_unmapped(crp, pages, secsize, pages_offset); + pages_offset += secsize; + pages += pages_offset >> PAGE_SHIFT; + pages_offset &= PAGE_MASK; + } crp->crp_opaque = (void *)bp; - data += secsize; if (bp->bio_cmd == BIO_WRITE) { crp->crp_op = CRYPTO_OP_ENCRYPT; crp->crp_callback = g_eli_crypto_write_done; Index: sys/kern/subr_bus_dma.c =================================================================== --- sys/kern/subr_bus_dma.c +++ sys/kern/subr_bus_dma.c @@ -661,6 +661,11 @@ error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; + case CRYPTO_BUF_UNMAPPED: + error = _bus_dmamap_load_ma(dmat, map, cb->cb_unmapped, + cb->cb_unmapped_len, cb->cb_unmapped_offset, flags, NULL, + &nsegs); + break; default: error = EINVAL; } Index: sys/opencrypto/criov.c =================================================================== --- sys/opencrypto/criov.c +++ sys/opencrypto/criov.c @@ -41,11 +41,17 @@ #include #include +#include + +#include +#include +#include + #include /* - * This macro is only for avoiding code duplication, as we need to skip - * given number of bytes in the same way in three functions below. + * These macros are only for avoiding code duplication, as we need to skip + * given number of bytes in the same way in several functions below. */ #define CUIO_SKIP() do { \ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ @@ -59,6 +65,17 @@ iov++; \ } \ } while (0) +#define CUNMAPPED_SKIP() do { \ + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ + while (off > 0) { \ + if (off < PAGE_SIZE) \ + break; \ + processed += PAGE_SIZE - off; \ + off -= PAGE_SIZE - off; \ + pages++; \ + } \ +} while (0) static void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) @@ -128,6 +145,96 @@ return (-1); } +#if CRYPTO_HAS_UNMAPPED +/* + * Apply function f to the data in a vm_page_t list starting "off" bytes from + * the beginning, continuing for "len" bytes. + */ +static int +cunmapped_apply(vm_page_t *pages, int off, int len, + int (*f)(void *, const void *, u_int), void *arg) +{ + int processed = 0; + unsigned count; + int rval; + + CUNMAPPED_SKIP(); + while (len > 0) { + char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)); + count = min(PAGE_SIZE - off, len); + rval = (*f)(arg, kaddr + off, count); + if (rval) + return (rval); + len -= count; + processed += count; + off = 0; + pages++; + } + return (0); +} + +static inline void * +cunmapped_contiguous_segment(vm_page_t *pages, size_t skip, int len) +{ + if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) + return (NULL); + + pages += (skip / PAGE_SIZE); + skip -= rounddown(skip, PAGE_SIZE); + return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip); +} + +/* + * Copy len bytes of data from the vm_page_t array, skipping the first off + * bytes, into the pointer cp. Return the number of bytes skipped and copied. + * Does not verify the length of the array. + */ +static int +cunmapped_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp) +{ + int processed = 0; + unsigned count; + + CUNMAPPED_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off, + count); + len -= count; + cp += count; + processed += count; + off = 0; + pages++; + } + return processed; +} + +/* + * Copy len bytes of data from the pointer cp into the vm_page_t array, + * skipping the first off bytes, Return the number of bytes skipped and copied. + * Does not verify the length of the array. + */ +static int +cunmapped_copydata(vm_page_t *pages, int off, int len, caddr_t cp) +{ + int processed = 0; + unsigned count; + + CUNMAPPED_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp, + count); + len -= count; + cp += count; + processed += count; + off = 0; + pages++; + } + return processed; +} +#endif /* CRYPTO_HAS_UNMAPPED */ + void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb) @@ -142,6 +249,11 @@ case CRYPTO_BUF_MBUF: cc->cc_mbuf = cb->cb_mbuf; break; + case CRYPTO_BUF_UNMAPPED: + cc->cc_unmapped = cb->cb_unmapped; + cc->cc_buf_len = cb->cb_unmapped_len; + cc->cc_offset = cb->cb_unmapped_offset; + break; case CRYPTO_BUF_UIO: cc->cc_iov = cb->cb_uio->uio_iov; break; @@ -178,6 +290,21 @@ break; } break; + case CRYPTO_BUF_UNMAPPED: + for (;;) { + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + cc->cc_buf_len -= amount; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_unmapped++; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { remain = cc->cc_iov->iov_len - cc->cc_offset; @@ -212,6 +339,9 @@ KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, ("%s: not supported for unmapped mbufs", __func__)); return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); + case CRYPTO_BUF_UNMAPPED: + return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_unmapped)) + cc->cc_offset); case CRYPTO_BUF_UIO: return ((char *)cc->cc_iov->iov_base + cc->cc_offset); default: @@ -228,6 +358,8 @@ switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: return (cc->cc_buf_len); + case CRYPTO_BUF_UNMAPPED: + return (PAGE_SIZE - cc->cc_offset); case CRYPTO_BUF_MBUF: if (cc->cc_mbuf == NULL) return (0); @@ -278,6 +410,26 @@ break; } break; + case CRYPTO_BUF_UNMAPPED: + for (;;) { + dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_unmapped)) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_unmapped++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; @@ -339,6 +491,26 @@ break; } break; + case CRYPTO_BUF_UNMAPPED: + for (;;) { + src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_unmapped)) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_unmapped++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { src = (const char *)cc->cc_iov->iov_base + @@ -421,6 +593,15 @@ case CRYPTO_BUF_MBUF: m_copyback(cb->cb_mbuf, off, size, src); break; +#if CRYPTO_HAS_UNMAPPED + case CRYPTO_BUF_UNMAPPED: + MPASS(size <= cb->cb_unmapped_len); + MPASS(size + off <= + cb->cb_unmapped_len + cb->cb_unmapped_offset); + cunmapped_copyback(cb->cb_unmapped, + off + cb->cb_unmapped_offset, size, src); + break; +#endif /* CRYPTO_HAS_UNMAPPED */ case CRYPTO_BUF_UIO: cuio_copyback(cb->cb_uio, off, size, src); break; @@ -444,6 +625,15 @@ case CRYPTO_BUF_MBUF: m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); break; +#if CRYPTO_HAS_UNMAPPED + case CRYPTO_BUF_UNMAPPED: + MPASS(size <= crp->crp_buf.cb_unmapped_len); + MPASS(size + off <= crp->crp_buf.cb_unmapped_len + + crp->crp_buf.cb_unmapped_offset); + cunmapped_copydata(crp->crp_buf.cb_unmapped, + off + crp->crp_buf.cb_unmapped_offset, size, dst); + break; +#endif /* CRYPTO_HAS_UNMAPPED */ case CRYPTO_BUF_UIO: cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); break; @@ -473,6 +663,12 @@ case CRYPTO_BUF_UIO: error = cuio_apply(cb->cb_uio, off, len, f, arg); break; +#if CRYPTO_HAS_UNMAPPED + case CRYPTO_BUF_UNMAPPED: + error = cunmapped_apply(cb->cb_unmapped, + off + cb->cb_unmapped_offset, len, f, arg); + break; +#endif /* CRYPTO_HAS_UNMAPPED */ case CRYPTO_BUF_CONTIG: MPASS(off + len <= cb->cb_buf_len); error = (*f)(arg, cb->cb_buf + off, len); @@ -540,6 +736,12 @@ return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); case CRYPTO_BUF_UIO: return (cuio_contiguous_segment(cb->cb_uio, skip, len)); +#if CRYPTO_HAS_UNMAPPED + case CRYPTO_BUF_UNMAPPED: + MPASS(skip + len <= cb->cb_unmapped_len); + return (cunmapped_contiguous_segment(cb->cb_unmapped, + skip + cb->cb_unmapped_offset, len)); +#endif /* CRYPTO_HAS_UNMAPPED */ case CRYPTO_BUF_CONTIG: MPASS(skip + len <= cb->cb_buf_len); return (cb->cb_buf + skip); Index: sys/opencrypto/crypto.c =================================================================== --- sys/opencrypto/crypto.c +++ sys/opencrypto/crypto.c @@ -78,7 +78,9 @@ #include +#include #include + #include #include #include @@ -1218,6 +1220,8 @@ if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); + case CRYPTO_BUF_UNMAPPED: + return (cb->cb_unmapped_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: @@ -1232,9 +1236,25 @@ { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); - if (cb->cb_type == CRYPTO_BUF_CONTIG) + switch (cb->cb_type) { + case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); + break; + case CRYPTO_BUF_UNMAPPED: + KASSERT(CRYPTO_HAS_UNMAPPED, + ("incoming crp uses dmap on supported arch")); + KASSERT(cb->cb_unmapped_len >= 0, + ("incoming crp with -ve %s buffer length", name)); + KASSERT(cb->cb_unmapped_offset >= 0, + ("incoming crp with -ve %s buffer offset", name)); + KASSERT(cb->cb_unmapped_offset < PAGE_SIZE, + ("incoming crp with %s buffer offset greater than page size" + , name)); + break; + default: + break; + } } static void Index: sys/opencrypto/cryptodev.h =================================================================== --- sys/opencrypto/cryptodev.h +++ sys/opencrypto/cryptodev.h @@ -205,6 +205,9 @@ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ +/* Does crypto support unmapped buffers on this platform? */ +#define CRYPTO_HAS_UNMAPPED ( PMAP_HAS_DMAP ) + /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ @@ -387,7 +390,8 @@ CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, - CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF + CRYPTO_BUF_UNMAPPED, + CRYPTO_BUF_LAST = CRYPTO_BUF_UNMAPPED }; /* @@ -402,6 +406,11 @@ int cb_buf_len; }; struct mbuf *cb_mbuf; + struct { + vm_page_t *cb_unmapped; + int cb_unmapped_len; + int cb_unmapped_offset; + }; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; @@ -415,11 +424,15 @@ char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; + vm_page_t *cc_unmapped; }; - union { - int cc_buf_len; - size_t cc_offset; - }; + /* Optional bytes of valid data remaining */ + int cc_buf_len; + /* + * Optional offset within the current buffer segment where + * valid data begins + */ + size_t cc_offset; enum crypto_buffer_type cc_type; }; @@ -509,6 +522,16 @@ } static __inline void +_crypto_use_unmapped(struct crypto_buffer *cb, vm_page_t *pages, int len, + int offset) +{ + cb->cb_unmapped = pages; + cb->cb_unmapped_len = len; + cb->cb_unmapped_offset = offset; + cb->cb_type = CRYPTO_BUF_UNMAPPED; +} + +static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; @@ -528,6 +551,13 @@ } static __inline void +crypto_use_unmapped(struct cryptop *crp, vm_page_t *pages, int len, + int offset) +{ + _crypto_use_unmapped(&crp->crp_buf, pages, len, offset); +} + +static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); @@ -543,6 +573,13 @@ crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); +} + +static __inline void +crypto_use_output_unmapped(struct cryptop *crp, vm_page_t *pages, int len, + int offset) +{ + _crypto_use_unmapped(&crp->crp_obuf, pages, len, offset); } static __inline void Index: sys/opencrypto/cryptosoft.c =================================================================== --- sys/opencrypto/cryptosoft.c +++ sys/opencrypto/cryptosoft.c @@ -980,6 +980,10 @@ } } break; + case CRYPTO_BUF_UNMAPPED: + adj = crp->crp_payload_length - result; + crp->crp_buf.cb_unmapped_len -= adj; + break; default: break; }