Index: sys/crypto/ccp/ccp.c =================================================================== --- sys/crypto/ccp/ccp.c +++ sys/crypto/ccp/ccp.c @@ -107,6 +107,10 @@ case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_SF_BUF: + error = sglist_append_sf_buf(sg, cb->cb_sf_buf, + cb->cb_sf_buf_len, cb->cb_sf_buf_offset); + break; default: error = EINVAL; } Index: sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- sys/dev/cxgbe/crypto/t4_crypto.c +++ sys/dev/cxgbe/crypto/t4_crypto.c @@ -272,6 +272,10 @@ case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_SF_BUF: + error = sglist_append_sf_buf(sg, cb->cb_sf_buf, + cb->cb_sf_buf_len, cb->cb_sf_buf_offset); + break; default: error = EINVAL; } Index: sys/dev/sec/sec.c =================================================================== --- sys/dev/sec/sec.c +++ sys/dev/sec/sec.c @@ -851,6 +851,9 @@ case CRYPTO_BUF_MBUF: size = m_length(crp->crp_buf.cb_mbuf, NULL); break; + case CRYPTO_BUF_SF_BUF: + size = PAGE_SIZE - cb->cb_sf_buf_offset; + break; default: return (EINVAL); } Index: sys/geom/eli/g_eli.c =================================================================== --- sys/geom/eli/g_eli.c +++ sys/geom/eli/g_eli.c @@ -972,6 +972,13 @@ */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; + /* + * Don't use unmapped IO with data integrity verification. That module + * needs much more intimate access to the bio's data in order to + * add/remove HMAC fields, and split bios that exceed MAXPHYS. + */ + if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0) + pp->flags |= G_PF_ACCEPT_UNMAPPED; pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; LIST_FOREACH(gap, &bpp->aliases, ga_next) Index: sys/geom/eli/g_eli_privacy.c =================================================================== --- sys/geom/eli/g_eli_privacy.c +++ sys/geom/eli/g_eli_privacy.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -98,8 +99,18 @@ */ if (bp->bio_inbed < bp->bio_children) return (0); - free(bp->bio_driver2, M_ELI); - bp->bio_driver2 = NULL; + MPASS(((bp->bio_flags & BIO_UNMAPPED) != 0) == + (bp->bio_driver2 != NULL)); + if (bp->bio_driver2 != NULL) { + struct sf_buf **sf_bufs = bp->bio_driver2; + int i; + + for (i = 0; i < bp->bio_ma_n; i++ ) + sf_buf_free(sf_bufs[i]); + free(bp->bio_driver2, M_ELI); + bp->bio_driver2 = NULL; + } + if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); @@ -167,6 +178,7 @@ return (0); } cbp->bio_data = bp->bio_driver2; + cbp->bio_flags &= ~BIO_UNMAPPED; cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; @@ -236,10 +248,12 @@ { struct g_eli_softc *sc; struct cryptop *crp; + struct sf_buf **sf_bufs; u_int i, nsec, secsize; off_t dstoff; - u_char *data; + u_char *data = NULL; int error; + int sf_buf_offset; G_ELI_LOGREQ(3, bp, "%s", __func__); @@ -258,16 +272,49 @@ if (bp->bio_cmd == BIO_WRITE) { data = malloc(bp->bio_length, M_ELI, M_WAITOK); bp->bio_driver2 = data; - bcopy(bp->bio_data, data, bp->bio_length); - } else - data = bp->bio_data; + /* + * XXX This copy could be eliminated by using crypto's output + * buffer, instead of using a single overwriting buffer. + */ + if ((bp->bio_flags & BIO_UNMAPPED) != 0) + g_io_bio_copyin(bp, data); + else + bcopy(bp->bio_data, data, bp->bio_length); + } else { + if ((bp->bio_flags & BIO_UNMAPPED) != 0) { + int nbufs = bp->bio_ma_n; + int sf_flags = 0; + int i; + if (sc->sc_cpubind) + sf_flags |= SFB_CPUPRIVATE; + sf_bufs = malloc(sizeof(*sf_bufs) * nbufs, + M_ELI, M_WAITOK | M_ZERO); + bp->bio_driver2 = sf_bufs; + for (i = 0; i < nbufs; i++) { + sf_bufs[i] = sf_buf_alloc(bp->bio_ma[i], + sf_flags); + MPASS(sf_bufs[i] != NULL); + } + sf_buf_offset = bp->bio_ma_offset; + } else { + data = bp->bio_data; + } + } + for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); - crypto_use_buf(crp, data, secsize); + if (data) { + crypto_use_buf(crp, data, secsize); + data += secsize; + } else { + crypto_use_sf_buf(crp, sf_bufs, secsize, sf_buf_offset); + sf_buf_offset += secsize; + sf_bufs += sf_buf_offset >> PAGE_SHIFT; + sf_buf_offset &= PAGE_MASK; + } crp->crp_opaque = (void *)bp; - data += secsize; if (bp->bio_cmd == BIO_WRITE) { crp->crp_op = CRYPTO_OP_ENCRYPT; crp->crp_callback = g_eli_crypto_write_done; Index: sys/geom/geom.h =================================================================== --- sys/geom/geom.h +++ sys/geom/geom.h @@ -330,6 +330,8 @@ struct bio * g_clone_bio(struct bio *); struct bio * g_duplicate_bio(struct bio *); void g_destroy_bio(struct bio *); +void g_io_bio_copyin(struct bio *bp, void *kaddr); +void g_io_bio_copyout(void *kaddr, struct bio *bp); void g_io_deliver(struct bio *bp, int error); int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr); int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp); Index: sys/geom/geom_io.c =================================================================== --- sys/geom/geom_io.c +++ sys/geom/geom_io.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -741,6 +742,48 @@ SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, &inflight_transient_maps, 0, "Current count of the active transient maps"); + +static void +g_io_bio_copy(struct bio *bp, void *kaddr, bool in) +{ + struct uio uio; + struct iovec iov[1]; + + iov[0].iov_base = kaddr; + iov[0].iov_len = bp->bio_length; + uio.uio_iov = iov; + uio.uio_iovcnt = 1; + uio.uio_offset = 0; + uio.uio_resid = bp->bio_length; + uio.uio_segflg = UIO_SYSSPACE; + if (in) + uio.uio_rw = UIO_READ; + else + uio.uio_rw = UIO_WRITE; + uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio); +} + +/* + * Copy data from a (potentially unmapped) bio to a kernelspace buffer. + * + * The buffer must have at least as much room as bp->bio_length. + */ +void +g_io_bio_copyin(struct bio *bp, void *kaddr) +{ + g_io_bio_copy(bp, kaddr, true); +} + +/* + * Copy data from a kernelspace buffer to a (potentially unmapped) bio + * + * The buffer must have at least as much room as bp->bio_length. + */ +void +g_io_bio_copyout(void *kaddr, struct bio *bp) +{ + g_io_bio_copy(bp, kaddr, false); +} static int g_io_transient_map_bio(struct bio *bp) Index: sys/kern/subr_bus_dma.c =================================================================== --- sys/kern/subr_bus_dma.c +++ sys/kern/subr_bus_dma.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -332,6 +333,32 @@ } /* + * Load a list of sf_buf + */ +static int +_bus_dmamap_load_sf_buf(bus_dma_tag_t dmat, bus_dmamap_t map, + struct sf_buf **sf_buf, size_t len, size_t offset, int *nsegs, int flags) +{ + char *addr; + int error; + + while (len > 0) { + size_t seglen = PAGE_SIZE - offset; + + addr = (char*)sf_buf_kva(*sf_buf) + offset; + error = _bus_dmamap_load_buffer(dmat, map, addr, + seglen, kernel_pmap, flags, NULL, nsegs); + if (error) + break; + offset = 0; + sf_buf++; + len -= seglen; + } + + return (error); +} + +/* * Load a uio. */ static int @@ -661,6 +688,9 @@ error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; + case CRYPTO_BUF_SF_BUF: + error = _bus_dmamap_load_sf_buf(dmat, map, cb->cb_sf_buf, + cb->cb_sf_buf_len, cb->cb_sf_buf_offset, &nsegs, flags); default: error = EINVAL; } Index: sys/kern/subr_sglist.c =================================================================== --- sys/kern/subr_sglist.c +++ sys/kern/subr_sglist.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -621,6 +622,37 @@ } } return (0); +} + +int +sglist_append_sf_buf(struct sglist *sg, struct sf_buf **sf_buf, size_t len, + int offset) +{ + struct sgsave save; + size_t done; + int error; + + if (sg->sg_maxseg == 0) + return (EINVAL); + + error = 0; + SGLIST_SAVE(sg, save); + while (len > 0) { + size_t seglen; + + seglen = MIN(len, PAGE_SIZE - offset); + error = _sglist_append_buf(sg, + (char*)sf_buf_kva(*sf_buf) + offset, seglen, NULL, &done); + if (error) { + SGLIST_RESTORE(sg, save); + return (error); + } + offset += done; + sf_buf += offset >> PAGE_SHIFT; + offset &= PAGE_MASK; + len -= done; + } + return (error); } /* Index: sys/opencrypto/criov.c =================================================================== --- sys/opencrypto/criov.c +++ sys/opencrypto/criov.c @@ -40,12 +40,14 @@ #include #include #include +#include +#include #include /* - * This macro is only for avoiding code duplication, as we need to skip - * given number of bytes in the same way in three functions below. + * These macros are only for avoiding code duplication, as we need to skip + * given number of bytes in the same way in several functions below. */ #define CUIO_SKIP() do { \ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ @@ -59,6 +61,17 @@ iov++; \ } \ } while (0) +#define SF_BUF_SKIP() do { \ + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ + while (off > 0) { \ + if (off < PAGE_SIZE) \ + break; \ + processed += PAGE_SIZE - off; \ + off -= PAGE_SIZE - off; \ + sf_buf++; \ + } \ +} while (0) static void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) @@ -128,6 +141,91 @@ return (-1); } +/* + * Apply function f to the data in an sf_buf list starting "off" bytes from + * the beginning, continuing for "len" bytes. + */ +static int +sf_buf_apply(struct sf_buf **sf_buf, int off, int len, + int (*f)(void *, const void *, u_int), void *arg) +{ + int processed = 0; + unsigned count; + int rval; + + SF_BUF_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + rval = (*f)(arg, (char*)sf_buf_kva(*sf_buf) + off, count); + if (rval) + return (rval); + len -= count; + processed += count; + off = 0; + sf_buf++; + } + return (0); +} + +static inline void * +sf_buf_contiguous_segment(struct sf_buf **sf_buf, size_t skip, int len) +{ + if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) + return (NULL); + + sf_buf += (skip / PAGE_SIZE); + skip -= rounddown(skip, PAGE_SIZE); + return ((char*)sf_buf_kva(*sf_buf) + skip); +} + +/* + * Copy len bytes of data from the sf_buf array, skipping the first off bytes, + * into the pointer cp. Return the number of bytes skipped and copied. Does + * not verify the length of the sf_buf array. + */ +static int +sf_buf_copyback(struct sf_buf **sf_buf, int off, int len, c_caddr_t cp) +{ + int processed = 0; + unsigned count; + + SF_BUF_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy(cp, (char*)sf_buf_kva(*sf_buf) + off, count); + len -= count; + cp += count; + processed += count; + off = 0; + sf_buf++; + } + return processed; +} + +/* + * Copy len bytes of data from the pointer cp into the sf_buf array, skipping + * the first off bytes, Return the number of bytes skipped and copied. Does + * not verify the length of the sf_buf array. + */ +static int +sf_buf_copydata(struct sf_buf **sf_buf, int off, int len, caddr_t cp) +{ + int processed = 0; + unsigned count; + + SF_BUF_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy((void*)((char*)sf_buf_kva(*sf_buf) + off), cp, count); + len -= count; + cp += count; + processed += count; + off = 0; + sf_buf++; + } + return processed; +} + void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb) @@ -142,6 +240,11 @@ case CRYPTO_BUF_MBUF: cc->cc_mbuf = cb->cb_mbuf; break; + case CRYPTO_BUF_SF_BUF: + cc->cc_sf_buf = cb->cb_sf_buf; + cc->cc_buf_len = cb->cb_sf_buf_len; + cc->cc_offset = cb->cb_sf_buf_offset; + break; case CRYPTO_BUF_UIO: cc->cc_iov = cb->cb_uio->uio_iov; break; @@ -178,6 +281,21 @@ break; } break; + case CRYPTO_BUF_SF_BUF: + for (;;) { + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + cc->cc_buf_len -= amount; + if (amount < remain) { + cc->cc_offset += amount; + break; + } + amount -= remain; + cc->cc_sf_buf++; + cc->cc_offset = 0; + if (amount == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { remain = cc->cc_iov->iov_len - cc->cc_offset; @@ -203,6 +321,8 @@ void * crypto_cursor_segbase(struct crypto_buffer_cursor *cc) { + char *sf_kva; + switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: return (cc->cc_buf); @@ -212,6 +332,9 @@ KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, ("%s: not supported for unmapped mbufs", __func__)); return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); + case CRYPTO_BUF_SF_BUF: + sf_kva = (char*)sf_buf_kva(*cc->cc_sf_buf); + return (void*)(sf_kva + cc->cc_offset); case CRYPTO_BUF_UIO: return ((char *)cc->cc_iov->iov_base + cc->cc_offset); default: @@ -227,7 +350,9 @@ { switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: - return (cc->cc_buf_len); + return cc->cc_buf_len; + case CRYPTO_BUF_SF_BUF: + return (PAGE_SIZE - cc->cc_offset); case CRYPTO_BUF_MBUF: if (cc->cc_mbuf == NULL) return (0); @@ -278,6 +403,25 @@ break; } break; + case CRYPTO_BUF_SF_BUF: + for (;;) { + dst = (char*)sf_buf_kva(*cc->cc_sf_buf) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_sf_buf++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; @@ -339,6 +483,25 @@ break; } break; + case CRYPTO_BUF_SF_BUF: + for (;;) { + src = (char*)sf_buf_kva(*cc->cc_sf_buf) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_sf_buf++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { src = (const char *)cc->cc_iov->iov_base + @@ -421,6 +584,12 @@ case CRYPTO_BUF_MBUF: m_copyback(cb->cb_mbuf, off, size, src); break; + case CRYPTO_BUF_SF_BUF: + MPASS(size <= cb->cb_sf_buf_len); + MPASS(size + off <= cb->cb_sf_buf_len + cb->cb_sf_buf_offset); + sf_buf_copyback(cb->cb_sf_buf, off + cb->cb_sf_buf_offset, size, + src); + break; case CRYPTO_BUF_UIO: cuio_copyback(cb->cb_uio, off, size, src); break; @@ -444,6 +613,12 @@ case CRYPTO_BUF_MBUF: m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); break; + case CRYPTO_BUF_SF_BUF: + MPASS(size <= crp->crp_buf.cb_sf_buf_len); + MPASS(size + off <= crp->crp_buf.cb_sf_buf_len + crp->crp_buf.cb_sf_buf_offset); + sf_buf_copydata(crp->crp_buf.cb_sf_buf, off + crp->crp_buf.cb_sf_buf_offset, size, + dst); + break; case CRYPTO_BUF_UIO: cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); break; @@ -473,6 +648,10 @@ case CRYPTO_BUF_UIO: error = cuio_apply(cb->cb_uio, off, len, f, arg); break; + case CRYPTO_BUF_SF_BUF: + error = sf_buf_apply(cb->cb_sf_buf, off + cb->cb_sf_buf_offset, + len, f, arg); + break; case CRYPTO_BUF_CONTIG: MPASS(off + len <= cb->cb_buf_len); error = (*f)(arg, cb->cb_buf + off, len); @@ -540,6 +719,10 @@ return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); case CRYPTO_BUF_UIO: return (cuio_contiguous_segment(cb->cb_uio, skip, len)); + case CRYPTO_BUF_SF_BUF: + MPASS(skip + len <= cb->cb_sf_buf_len); + return (sf_buf_contiguous_segment(cb->cb_sf_buf, + skip + cb->cb_sf_buf_offset, len)); case CRYPTO_BUF_CONTIG: MPASS(skip + len <= cb->cb_buf_len); return (cb->cb_buf + skip); Index: sys/opencrypto/crypto.c =================================================================== --- sys/opencrypto/crypto.c +++ sys/opencrypto/crypto.c @@ -1240,6 +1240,8 @@ if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); + case CRYPTO_BUF_SF_BUF: + return (cb->cb_sf_buf_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: @@ -1254,9 +1256,23 @@ { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); - if (cb->cb_type == CRYPTO_BUF_CONTIG) + switch (cb->cb_type) { + case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); + break; + case CRYPTO_BUF_SF_BUF: + KASSERT(cb->cb_sf_buf_len >= 0, + ("incoming crp with -ve %s buffer length", name)); + KASSERT(cb->cb_sf_buf_offset >= 0, + ("incoming crp with -ve %s buffer offset", name)); + KASSERT(cb->cb_sf_buf_offset < PAGE_SIZE, + ("incoming crp with %s buffer offset greater than page size" + , name)); + break; + default: + break; + } } static void Index: sys/opencrypto/cryptodev.h =================================================================== --- sys/opencrypto/cryptodev.h +++ sys/opencrypto/cryptodev.h @@ -387,7 +387,8 @@ CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, - CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF + CRYPTO_BUF_SF_BUF, + CRYPTO_BUF_LAST = CRYPTO_BUF_SF_BUF }; /* @@ -402,6 +403,11 @@ int cb_buf_len; }; struct mbuf *cb_mbuf; + struct { + struct sf_buf **cb_sf_buf; + int cb_sf_buf_len; + int cb_sf_buf_offset; + }; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; @@ -415,11 +421,15 @@ char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; + struct sf_buf **cc_sf_buf; }; - union { - int cc_buf_len; - size_t cc_offset; - }; + /* Optional bytes of valid data remaining */ + int cc_buf_len; + /* + * Optional offset within the current buffer segment where + * valid data begins + */ + size_t cc_offset; enum crypto_buffer_type cc_type; }; @@ -509,6 +519,16 @@ } static __inline void +_crypto_use_sf_buf(struct crypto_buffer *cb, struct sf_buf **sf_buf, int len, + int offset) +{ + cb->cb_sf_buf = sf_buf; + cb->cb_sf_buf_len = len; + cb->cb_sf_buf_offset = offset; + cb->cb_type = CRYPTO_BUF_SF_BUF; +} + +static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; @@ -528,6 +548,13 @@ } static __inline void +crypto_use_sf_buf(struct cryptop *crp, struct sf_buf **sf_buf, int len, + int offset) +{ + _crypto_use_sf_buf(&crp->crp_buf, sf_buf, len, offset); +} + +static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); @@ -543,6 +570,13 @@ crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); +} + +static __inline void +crypto_use_output_sf_buf(struct cryptop *crp, struct sf_buf **sf_buf, int len, + int offset) +{ + _crypto_use_sf_buf(&crp->crp_obuf, sf_buf, len, offset); } static __inline void Index: sys/opencrypto/cryptosoft.c =================================================================== --- sys/opencrypto/cryptosoft.c +++ sys/opencrypto/cryptosoft.c @@ -980,6 +980,10 @@ } } break; + case CRYPTO_BUF_SF_BUF: + adj = crp->crp_payload_length - result; + crp->crp_buf.cb_sf_buf_len -= adj; + break; default: break; } Index: sys/sys/sglist.h =================================================================== --- sys/sys/sglist.h +++ sys/sys/sglist.h @@ -57,6 +57,7 @@ struct bio; struct mbuf; +struct sf_buf; struct uio; static __inline void @@ -92,6 +93,8 @@ size_t len); int sglist_append_phys(struct sglist *sg, vm_paddr_t paddr, size_t len); +int sglist_append_sf_buf(struct sglist *sg, struct sf_buf **sf_buf, + size_t len, int offset); int sglist_append_sglist(struct sglist *sg, struct sglist *source, size_t offset, size_t length); int sglist_append_uio(struct sglist *sg, struct uio *uio);