Changeset View
Standalone View
sys/geom/eli/g_eli_privacy.c
Show All 39 Lines | |||||
#include <sys/sysctl.h> | #include <sys/sysctl.h> | ||||
#include <sys/malloc.h> | #include <sys/malloc.h> | ||||
#include <sys/kthread.h> | #include <sys/kthread.h> | ||||
#include <sys/proc.h> | #include <sys/proc.h> | ||||
#include <sys/sched.h> | #include <sys/sched.h> | ||||
#include <sys/smp.h> | #include <sys/smp.h> | ||||
#include <sys/vnode.h> | #include <sys/vnode.h> | ||||
#include <machine/vmparam.h> | |||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <geom/geom.h> | #include <geom/geom.h> | ||||
#include <geom/geom_dbg.h> | #include <geom/geom_dbg.h> | ||||
#include <geom/eli/g_eli.h> | #include <geom/eli/g_eli.h> | ||||
#include <geom/eli/pkcs5v2.h> | #include <geom/eli/pkcs5v2.h> | ||||
/* | /* | ||||
* Code paths: | * Code paths: | ||||
* BIO_READ: | * BIO_READ: | ||||
* g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver | * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver | ||||
* BIO_WRITE: | * BIO_WRITE: | ||||
* g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver | * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver | ||||
*/ | */ | ||||
MALLOC_DECLARE(M_ELI); | MALLOC_DECLARE(M_ELI); | ||||
/* | /* | ||||
* Copy data from a (potentially unmapped) bio to a kernelspace buffer. | |||||
* | |||||
* The buffer must have at least as much room as bp->bio_length. | |||||
*/ | |||||
#if PMAP_HAS_DMAP | |||||
static void | |||||
g_eli_bio_copyin(struct bio *bp, void *kaddr) | |||||
{ | |||||
struct uio uio; | |||||
struct iovec iov[1]; | |||||
iov[0].iov_base = kaddr; | |||||
iov[0].iov_len = bp->bio_length; | |||||
uio.uio_iov = iov; | |||||
uio.uio_iovcnt = 1; | |||||
uio.uio_offset = 0; | |||||
uio.uio_resid = bp->bio_length; | |||||
uio.uio_segflg = UIO_SYSSPACE; | |||||
uio.uio_rw = UIO_READ; | |||||
uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio); | |||||
} | |||||
#endif | |||||
/* | |||||
* The function is called after we read and decrypt data. | * The function is called after we read and decrypt data. | ||||
* | * | ||||
* g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver | * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver | ||||
*/ | */ | ||||
static int | static int | ||||
g_eli_crypto_read_done(struct cryptop *crp) | g_eli_crypto_read_done(struct cryptop *crp) | ||||
{ | { | ||||
struct g_eli_softc *sc; | struct g_eli_softc *sc; | ||||
Show All 19 Lines | g_eli_crypto_read_done(struct cryptop *crp) | ||||
if (sc != NULL && crp->crp_cipher_key != NULL) | if (sc != NULL && crp->crp_cipher_key != NULL) | ||||
g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); | g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); | ||||
crypto_freereq(crp); | crypto_freereq(crp); | ||||
/* | /* | ||||
* Do we have all sectors already? | * Do we have all sectors already? | ||||
*/ | */ | ||||
if (bp->bio_inbed < bp->bio_children) | if (bp->bio_inbed < bp->bio_children) | ||||
return (0); | return (0); | ||||
free(bp->bio_driver2, M_ELI); | |||||
bp->bio_driver2 = NULL; | |||||
if (bp->bio_error != 0) { | if (bp->bio_error != 0) { | ||||
G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", | G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", | ||||
bp->bio_error); | bp->bio_error); | ||||
bp->bio_completed = 0; | bp->bio_completed = 0; | ||||
} | } | ||||
/* | /* | ||||
* Read is finished, send it up. | * Read is finished, send it up. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | if (bp->bio_error != 0) { | ||||
free(bp->bio_driver2, M_ELI); | free(bp->bio_driver2, M_ELI); | ||||
bp->bio_driver2 = NULL; | bp->bio_driver2 = NULL; | ||||
g_destroy_bio(cbp); | g_destroy_bio(cbp); | ||||
g_io_deliver(bp, bp->bio_error); | g_io_deliver(bp, bp->bio_error); | ||||
atomic_subtract_int(&sc->sc_inflight, 1); | atomic_subtract_int(&sc->sc_inflight, 1); | ||||
return (0); | return (0); | ||||
} | } | ||||
cbp->bio_data = bp->bio_driver2; | cbp->bio_data = bp->bio_driver2; | ||||
cbp->bio_flags &= ~BIO_UNMAPPED; | |||||
kib: Why do you clear the flag ? | |||||
asomersAuthorUnsubmitted Done Inline ActionsBecause during writes, g_eli_crypto_run mallocs a new buffer and points bp->bio_data to it. So the bio is mapped now if it wasn't before. asomers: Because during writes, `g_eli_crypto_run` mallocs a new buffer and points `bp->bio_data` to it. | |||||
kibUnsubmitted Done Inline ActionsEither this should be stated as comment, or BIO_UNMAPPED cleared at the point where you reset bio_data, or both. kib: Either this should be stated as comment, or BIO_UNMAPPED cleared at the point where you reset… | |||||
cbp->bio_done = g_eli_write_done; | cbp->bio_done = g_eli_write_done; | ||||
Done Inline ActionsThis line looks like it deserves a comment. Isn't it true that bio_driver2 != NULL if and only if the BIO is unmapped? If so, it seems clearer to check for the latter condition. markj: This line looks like it deserves a comment.
Isn't it true that bio_driver2 != NULL if and only… | |||||
cp = LIST_FIRST(&gp->consumer); | cp = LIST_FIRST(&gp->consumer); | ||||
cbp->bio_to = cp->provider; | cbp->bio_to = cp->provider; | ||||
G_ELI_LOGREQ(2, cbp, "Sending request."); | G_ELI_LOGREQ(2, cbp, "Sending request."); | ||||
/* | /* | ||||
* Send encrypted data to the provider. | * Send encrypted data to the provider. | ||||
*/ | */ | ||||
g_io_request(cbp, cp); | g_io_request(cbp, cp); | ||||
return (0); | return (0); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
void | void | ||||
g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) | g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) | ||||
{ | { | ||||
struct g_eli_softc *sc; | struct g_eli_softc *sc; | ||||
struct cryptop *crp; | struct cryptop *crp; | ||||
u_int i, nsec, secsize; | u_int i, nsec, secsize; | ||||
off_t dstoff; | off_t dstoff; | ||||
u_char *data; | u_char *data = NULL; | ||||
int error; | int error; | ||||
#if PMAP_HAS_DMAP | |||||
vm_page_t *pages; | |||||
int pages_offset; | |||||
#endif /* PMAP_HAS_DMAP */ | |||||
G_ELI_LOGREQ(3, bp, "%s", __func__); | G_ELI_LOGREQ(3, bp, "%s", __func__); | ||||
bp->bio_pflags = wr->w_number; | bp->bio_pflags = wr->w_number; | ||||
sc = wr->w_softc; | sc = wr->w_softc; | ||||
secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; | secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; | ||||
nsec = bp->bio_length / secsize; | nsec = bp->bio_length / secsize; | ||||
bp->bio_inbed = 0; | bp->bio_inbed = 0; | ||||
bp->bio_children = nsec; | bp->bio_children = nsec; | ||||
/* | /* | ||||
* If we write the data we cannot destroy current bio_data content, | * If we write the data we cannot destroy current bio_data content, | ||||
* so we need to allocate more memory for encrypted data. | * so we need to allocate more memory for encrypted data. | ||||
*/ | */ | ||||
if (bp->bio_cmd == BIO_WRITE) { | if (bp->bio_cmd == BIO_WRITE) { | ||||
data = malloc(bp->bio_length, M_ELI, M_WAITOK); | data = malloc(bp->bio_length, M_ELI, M_WAITOK); | ||||
bp->bio_driver2 = data; | bp->bio_driver2 = data; | ||||
/* | |||||
* This copy could be eliminated by using crypto's output | |||||
* buffer, instead of using a single overwriting buffer. | |||||
*/ | |||||
#if PMAP_HAS_DMAP | |||||
if ((bp->bio_flags & BIO_UNMAPPED) != 0) | |||||
g_eli_bio_copyin(bp, data); | |||||
else | |||||
#endif /* PMAP_HAS_DMAP */ | |||||
bcopy(bp->bio_data, data, bp->bio_length); | bcopy(bp->bio_data, data, bp->bio_length); | ||||
} else { | |||||
#if PMAP_HAS_DMAP | |||||
if ((bp->bio_flags & BIO_UNMAPPED) != 0) { | |||||
pages = bp->bio_ma; | |||||
pages_offset = bp->bio_ma_offset; | |||||
} else | } else | ||||
#endif /* PMAP_HAS_DMAP */ | |||||
data = bp->bio_data; | data = bp->bio_data; | ||||
} | |||||
for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { | for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { | ||||
crp = crypto_getreq(wr->w_sid, M_WAITOK); | crp = crypto_getreq(wr->w_sid, M_WAITOK); | ||||
Done Inline ActionsNote that this malloc(9) is not needed on DMAP arches, it only adds overhead. I probably should suggest this explicitly: use unmapped path only on DMAP arches, leave current transient remapping when DMAP is not available. kib: Note that this malloc(9) is not needed on DMAP arches, it only adds overhead.
Generally, malloc… | |||||
Done Inline ActionsSo you think that sf_buf would be too slow on non-DMAP arches to be worth using? Ok, that's easy. But why do you say that this malloc is not needed? Are you suggesting that crypto(9) work on an array of vm_page pointers instead or even on a struct bio ? asomers: So you think that sf_buf would be too slow on non-DMAP arches to be worth using? Ok, that's… | |||||
Done Inline ActionsRather I think that non-DMAP arches are already slow, and make the efforts to speed up DMAP arches too hard. I am saying that malloc() is not needed because you already have the bio_ma array and on DMAP arches it is same as the array of sf_bufs. Crypto(9) operating directly on array of vm_pages is the best option, I think. At least, when I worked on unmapped io and busdma, directly taking array of vm_pages was the option that required the minimum amount of plumbing comparing with any other case. kib: Rather I think that non-DMAP arches are already slow, and make the efforts to speed up DMAP… | |||||
if (data) { | |||||
crypto_use_buf(crp, data, secsize); | crypto_use_buf(crp, data, secsize); | ||||
crp->crp_opaque = (void *)bp; | |||||
data += secsize; | data += secsize; | ||||
} else { | |||||
MPASS2(PMAP_HAS_DMAP, | |||||
Done Inline ActionsIt is not safe to allocate more than one sf buf with blocking. Imagine that there are N total sf bufs in the system, and you have N threads trying to blockingly allocate 2 sf bufs each. The resulting deadlock is real. For instance, sendfile code only blocks for the first sfbuf allocation, all later allocs are non-blocking and code adopts to possible failure. Of course this is only relevant for arches without direct map. kib: It is not safe to allocate more than one sf buf with blocking. Imagine that there are N total… | |||||
Done Inline ActionsSo I need to ensure that any geli worker thread will only block on allocation for up to 1 buffer at a time. Should that be one buffer for the entire thread, or just one buffer for each bio? asomers: So I need to ensure that any geli worker thread will only block on allocation for up to 1… | |||||
Done Inline ActionsNevermind, I'm going to do something else instead. But I need to build an x86 head VM first to test it. Don't expect new code until tomorrow. asomers: Nevermind, I'm going to do something else instead. But I need to build an x86 head VM first to… | |||||
("geli can't use unmapped I/O on non-dmap arches")); | |||||
#if PMAP_HAS_DMAP | |||||
MPASS(pages != NULL); | |||||
crypto_use_vm_page(crp, pages, secsize, pages_offset); | |||||
pages_offset += secsize; | |||||
pages += pages_offset >> PAGE_SHIFT; | |||||
pages_offset &= PAGE_MASK; | |||||
#endif /* PMAP_HAS_DMAP */ | |||||
} | |||||
crp->crp_opaque = (void *)bp; | |||||
if (bp->bio_cmd == BIO_WRITE) { | if (bp->bio_cmd == BIO_WRITE) { | ||||
crp->crp_op = CRYPTO_OP_ENCRYPT; | crp->crp_op = CRYPTO_OP_ENCRYPT; | ||||
crp->crp_callback = g_eli_crypto_write_done; | crp->crp_callback = g_eli_crypto_write_done; | ||||
} else /* if (bp->bio_cmd == BIO_READ) */ { | } else /* if (bp->bio_cmd == BIO_READ) */ { | ||||
crp->crp_op = CRYPTO_OP_DECRYPT; | crp->crp_op = CRYPTO_OP_DECRYPT; | ||||
crp->crp_callback = g_eli_crypto_read_done; | crp->crp_callback = g_eli_crypto_read_done; | ||||
} | } | ||||
crp->crp_flags = CRYPTO_F_CBIFSYNC; | crp->crp_flags = CRYPTO_F_CBIFSYNC; | ||||
Show All 18 Lines |
Why do you clear the flag ?