Index: sys/geom/eli/g_eli.c =================================================================== --- sys/geom/eli/g_eli.c +++ sys/geom/eli/g_eli.c @@ -972,6 +972,13 @@ */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; + /* + * Don't use unmapped IO with data integrity verification. That module + * needs much more intimate access to the bio's data in order to + * add/remove HMAC fields, and split bios that exceed MAXPHYS. + */ + if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) + pp->flags |= G_PF_ACCEPT_UNMAPPED; pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; LIST_FOREACH(gap, &bpp->aliases, ga_next) Index: sys/geom/eli/g_eli_privacy.c =================================================================== --- sys/geom/eli/g_eli_privacy.c +++ sys/geom/eli/g_eli_privacy.c @@ -98,8 +98,13 @@ */ if (bp->bio_inbed < bp->bio_children) return (0); - free(bp->bio_driver2, M_ELI); - bp->bio_driver2 = NULL; + MPASS(((bp->bio_flags & BIO_UNMAPPED) != 0) == + (bp->bio_driver2 != NULL)); + if (bp->bio_driver2 != NULL) { + g_io_bio_copyout(bp->bio_driver2, bp, sc->sc_cpubind); + free(bp->bio_driver2, M_ELI); + bp->bio_driver2 = NULL; + } if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); @@ -166,7 +171,10 @@ atomic_subtract_int(&sc->sc_inflight, 1); return (0); } - cbp->bio_data = bp->bio_driver2; + if (bp->bio_driver2 != NULL) { + cbp->bio_data = bp->bio_driver2; + cbp->bio_flags &= ~BIO_UNMAPPED; + } cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; @@ -258,9 +266,20 @@ if (bp->bio_cmd == BIO_WRITE) { data = malloc(bp->bio_length, M_ELI, M_WAITOK); bp->bio_driver2 = data; - bcopy(bp->bio_data, data, bp->bio_length); - } else - data = bp->bio_data; + if ((bp->bio_flags & BIO_UNMAPPED) != 0) + g_io_bio_copyin(bp, data, sc->sc_cpubind); + else + bcopy(bp->bio_data, data, bp->bio_length); + } else { + if ((bp->bio_flags & BIO_UNMAPPED) != 0) { + data = malloc(bp->bio_length, M_ELI, M_WAITOK); + bp->bio_driver2 = data; + g_io_bio_copyin(bp, data, sc->sc_cpubind); + } else { + bp->bio_driver2 = NULL; + data = bp->bio_data; + } + } for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); Index: sys/geom/geom.h =================================================================== --- sys/geom/geom.h +++ sys/geom/geom.h @@ -330,6 +330,8 @@ struct bio * g_clone_bio(struct bio *); struct bio * g_duplicate_bio(struct bio *); void g_destroy_bio(struct bio *); +void g_io_bio_copyin(struct bio *bp, void *kaddr, bool pinned); +void g_io_bio_copyout(void *kaddr, struct bio *bp, bool pinned); void g_io_deliver(struct bio *bp, int error); int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr); int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp); Index: sys/geom/geom_io.c =================================================================== --- sys/geom/geom_io.c +++ sys/geom/geom_io.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -741,6 +742,75 @@ SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, &inflight_transient_maps, 0, "Current count of the active transient maps"); + +static void +g_io_bio_copy(struct bio *bp, void *kaddr, bool pinned, bool in) +{ + size_t dofs = 0; + size_t dlen = bp->bio_length; + int i, sf_flags; + + if (pinned) + sf_flags = SFB_CPUPRIVATE; + else + sf_flags = 0; + + for ( i = 0; i < bp->bio_ma_n; i++ ) { + struct sf_buf *sfb; + vm_page_t m; + char *vm; + size_t l; + int flags; + + flags = VM_ALLOC_WAITOK | VM_ALLOC_WIRED; + m = vm_page_grab_unlocked( + bp->bio_ma[i]->object, + bp->bio_ma[i]->pindex, + flags); + MPASS(m != NULL); + sfb = sf_buf_alloc(m, sf_flags); + MPASS(sfb != NULL); + vm = (char*)sf_buf_kva(sfb); + l = MIN(dlen, PAGE_SIZE); + if (i == 0) { + vm += bp->bio_ma_offset; + l -= bp->bio_ma_offset; + } + if (in) + bcopy(vm, (char*)kaddr + dofs, l); + else + bcopy((char*)kaddr + dofs, vm, l); + /* XXX is vm_page_xunbusy really correct ? */ + vm_page_xunbusy(m); + sf_buf_free(sfb); + dlen -= l; + dofs += l; + } +} + +/* + * Copy data from a (potentially unmapped) bio to a kernelspace buffer. + * + * The buffer must have at least as much room as bp->bio_length. + * If the thread is pinned to a single CPU, set pinned=true + */ +void +g_io_bio_copyin(struct bio *bp, void *kaddr, bool pinned) +{ + g_io_bio_copy(bp, kaddr, pinned, true); +} + +/* + * Copy data from a kernelspace buffer to a (potentially unmapped) bio + * + * The buffer must have at least as much room as bp->bio_length. + * If the thread is pinned to a single CPU, set pinned=true + */ +void +g_io_bio_copyout(void *kaddr, struct bio *bp, bool pinned) +{ + g_io_bio_copy(bp, kaddr, pinned, false); +} static int g_io_transient_map_bio(struct bio *bp)