Index: head/sys/crypto/armv8/armv8_crypto.c =================================================================== --- head/sys/crypto/armv8/armv8_crypto.c (revision 362625) +++ head/sys/crypto/armv8/armv8_crypto.c (revision 362626) @@ -1,385 +1,383 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014,2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is based on the aesni code. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct armv8_crypto_softc { int dieing; int32_t cid; struct rwlock lock; }; static struct mtx *ctx_mtx; static struct fpu_kern_ctx **ctx_vfp; #define AQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_vfp[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int armv8_crypto_cipher_process(struct armv8_crypto_session *, struct cryptop *); MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data"); static void armv8_crypto_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "armv8crypto", -1) == NULL && BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0) panic("ARMv8 crypto: could not attach"); } static int armv8_crypto_probe(device_t dev) { uint64_t reg; int ret = ENXIO; reg = READ_SPECIALREG(id_aa64isar0_el1); switch (ID_AA64ISAR0_AES_VAL(reg)) { case ID_AA64ISAR0_AES_BASE: case ID_AA64ISAR0_AES_PMULL: ret = 0; break; } device_set_desc_copy(dev, "AES-CBC"); /* TODO: Check more fields as we support more features */ return (ret); } static int armv8_crypto_attach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); sc->dieing = 0; sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } rw_init(&sc->lock, "armv8crypto"); ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_vfp[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW); } return (0); } static int armv8_crypto_detach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); rw_wlock(&sc->lock); sc->dieing = 1; rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); CPU_FOREACH(i) { if (ctx_vfp[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_vfp[i]); } ctx_vfp[i] = NULL; } free(ctx_mtx, M_ARMV8_CRYPTO); ctx_mtx = NULL; free(ctx_vfp, M_ARMV8_CRYPTO); ctx_vfp = NULL; return (0); } static int armv8_crypto_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (EINVAL); switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (EINVAL); } break; default: return (EINVAL); } default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void armv8_crypto_cipher_setup(struct armv8_crypto_session *ses, const struct crypto_session_params *csp) { int i; switch (csp->csp_cipher_klen * 8) { case 128: ses->rounds = AES128_ROUNDS; break; case 192: ses->rounds = AES192_ROUNDS; break; case 256: ses->rounds = AES256_ROUNDS; break; default: panic("invalid CBC key length"); } rijndaelKeySetupEnc(ses->enc_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); rijndaelKeySetupDec(ses->dec_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); for (i = 0; i < nitems(ses->enc_schedule); i++) { ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]); ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]); } } static int armv8_crypto_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct armv8_crypto_softc *sc; struct armv8_crypto_session *ses; sc = device_get_softc(dev); rw_wlock(&sc->lock); if (sc->dieing) { rw_wunlock(&sc->lock); return (EINVAL); } ses = crypto_get_driver_session(cses); armv8_crypto_cipher_setup(ses, csp); rw_wunlock(&sc->lock); return (0); } static int armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused) { struct armv8_crypto_session *ses; int error; /* We can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } ses = crypto_get_driver_session(crp->crp_session); error = armv8_crypto_cipher_process(ses, crp); out: crp->crp_etype = error; crypto_done(crp); return (error); } static uint8_t * armv8_crypto_cipher_alloc(struct cryptop *crp, int *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start, crp->crp_payload_length); if (addr != NULL) { *allocated = 0; return (addr); } addr = malloc(crp->crp_payload_length, M_ARMV8_CRYPTO, M_NOWAIT); if (addr != NULL) { *allocated = 1; crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, addr); } else *allocated = 0; return (addr); } static int armv8_crypto_cipher_process(struct armv8_crypto_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; uint8_t *buf; uint8_t iv[AES_BLOCK_LEN]; int allocated, i; int encflag; int kt; csp = crypto_get_params(crp->crp_session); encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); buf = armv8_crypto_cipher_alloc(crp, &allocated); if (buf == NULL) return (ENOMEM); kt = is_fpu_kern_thread(0); if (!kt) { AQUIRE_CTX(i, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if (crp->crp_cipher_key != NULL) { panic("armv8: new cipher key"); } crypto_read_iv(crp, iv); /* Do work */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); else armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, buf, iv); break; } if (allocated) crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, buf); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(i, ctx); } - if (allocated) { - bzero(buf, crp->crp_payload_length); - free(buf, M_ARMV8_CRYPTO); - } + if (allocated) + zfree(buf, M_ARMV8_CRYPTO); return (0); } static device_method_t armv8_crypto_methods[] = { DEVMETHOD(device_identify, armv8_crypto_identify), DEVMETHOD(device_probe, armv8_crypto_probe), DEVMETHOD(device_attach, armv8_crypto_attach), DEVMETHOD(device_detach, armv8_crypto_detach), DEVMETHOD(cryptodev_probesession, armv8_crypto_probesession), DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession), DEVMETHOD(cryptodev_process, armv8_crypto_process), DEVMETHOD_END, }; static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods, sizeof(struct armv8_crypto_softc)); static devclass_t armv8_crypto_devclass; DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass, 0, 0); Index: head/sys/crypto/via/padlock_hash.c =================================================================== --- head/sys/crypto/via/padlock_hash.c (revision 362625) +++ head/sys/crypto/via/padlock_hash.c (revision 362626) @@ -1,428 +1,425 @@ /*- * Copyright (c) 2006 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #include #include #endif #include #include #include #include /* * Implementation notes. * * Some VIA CPUs provides SHA1 and SHA256 acceleration. * We implement all HMAC algorithms provided by crypto(9) framework, but we do * the crypto work in software unless this is HMAC/SHA1 or HMAC/SHA256 and * our CPU can accelerate it. * * Additional CPU instructions, which preform SHA1 and SHA256 are one-shot * functions - we have only one chance to give the data, CPU itself will add * the padding and calculate hash automatically. * This means, it is not possible to implement common init(), update(), final() * methods. * The way I've choosen is to keep adding data to the buffer on update() * (reallocating the buffer if necessary) and call XSHA{1,256} instruction on * final(). */ struct padlock_sha_ctx { uint8_t *psc_buf; int psc_offset; int psc_size; }; CTASSERT(sizeof(struct padlock_sha_ctx) <= sizeof(union authctx)); static void padlock_sha_init(void *vctx); static int padlock_sha_update(void *vctx, const void *buf, u_int bufsize); static void padlock_sha1_final(uint8_t *hash, void *vctx); static void padlock_sha256_final(uint8_t *hash, void *vctx); static struct auth_hash padlock_hmac_sha1 = { .type = CRYPTO_SHA1_HMAC, .name = "HMAC-SHA1", .keysize = SHA1_BLOCK_LEN, .hashsize = SHA1_HASH_LEN, .ctxsize = sizeof(struct padlock_sha_ctx), .blocksize = SHA1_BLOCK_LEN, .Init = padlock_sha_init, .Update = padlock_sha_update, .Final = padlock_sha1_final, }; static struct auth_hash padlock_hmac_sha256 = { .type = CRYPTO_SHA2_256_HMAC, .name = "HMAC-SHA2-256", .keysize = SHA2_256_BLOCK_LEN, .hashsize = SHA2_256_HASH_LEN, .ctxsize = sizeof(struct padlock_sha_ctx), .blocksize = SHA2_256_BLOCK_LEN, .Init = padlock_sha_init, .Update = padlock_sha_update, .Final = padlock_sha256_final, }; MALLOC_DECLARE(M_PADLOCK); static __inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) { while (count-- > 0) *dst++ = bswap32(*src++); } static void padlock_do_sha1(const u_char *in, u_char *out, int count) { u_char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */ u_char *result = PADLOCK_ALIGN(buf); ((uint32_t *)result)[0] = 0x67452301; ((uint32_t *)result)[1] = 0xEFCDAB89; ((uint32_t *)result)[2] = 0x98BADCFE; ((uint32_t *)result)[3] = 0x10325476; ((uint32_t *)result)[4] = 0xC3D2E1F0; #ifdef __GNUCLIKE_ASM __asm __volatile( ".byte 0xf3, 0x0f, 0xa6, 0xc8" /* rep xsha1 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0) ); #endif padlock_output_block((uint32_t *)result, (uint32_t *)out, SHA1_HASH_LEN / sizeof(uint32_t)); } static void padlock_do_sha256(const char *in, char *out, int count) { char buf[128+16]; /* PadLock needs at least 128 bytes buffer. */ char *result = PADLOCK_ALIGN(buf); ((uint32_t *)result)[0] = 0x6A09E667; ((uint32_t *)result)[1] = 0xBB67AE85; ((uint32_t *)result)[2] = 0x3C6EF372; ((uint32_t *)result)[3] = 0xA54FF53A; ((uint32_t *)result)[4] = 0x510E527F; ((uint32_t *)result)[5] = 0x9B05688C; ((uint32_t *)result)[6] = 0x1F83D9AB; ((uint32_t *)result)[7] = 0x5BE0CD19; #ifdef __GNUCLIKE_ASM __asm __volatile( ".byte 0xf3, 0x0f, 0xa6, 0xd0" /* rep xsha256 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0) ); #endif padlock_output_block((uint32_t *)result, (uint32_t *)out, SHA2_256_HASH_LEN / sizeof(uint32_t)); } static void padlock_sha_init(void *vctx) { struct padlock_sha_ctx *ctx; ctx = vctx; ctx->psc_buf = NULL; ctx->psc_offset = 0; ctx->psc_size = 0; } static int padlock_sha_update(void *vctx, const void *buf, u_int bufsize) { struct padlock_sha_ctx *ctx; ctx = vctx; if (ctx->psc_size - ctx->psc_offset < bufsize) { ctx->psc_size = MAX(ctx->psc_size * 2, ctx->psc_size + bufsize); ctx->psc_buf = realloc(ctx->psc_buf, ctx->psc_size, M_PADLOCK, M_NOWAIT); if(ctx->psc_buf == NULL) return (ENOMEM); } bcopy(buf, ctx->psc_buf + ctx->psc_offset, bufsize); ctx->psc_offset += bufsize; return (0); } static void padlock_sha_free(void *vctx) { struct padlock_sha_ctx *ctx; ctx = vctx; if (ctx->psc_buf != NULL) { - //bzero(ctx->psc_buf, ctx->psc_size); - free(ctx->psc_buf, M_PADLOCK); + zfree(ctx->psc_buf, M_PADLOCK); ctx->psc_buf = NULL; ctx->psc_offset = 0; ctx->psc_size = 0; } } static void padlock_sha1_final(uint8_t *hash, void *vctx) { struct padlock_sha_ctx *ctx; ctx = vctx; padlock_do_sha1(ctx->psc_buf, hash, ctx->psc_offset); padlock_sha_free(ctx); } static void padlock_sha256_final(uint8_t *hash, void *vctx) { struct padlock_sha_ctx *ctx; ctx = vctx; padlock_do_sha256(ctx->psc_buf, hash, ctx->psc_offset); padlock_sha_free(ctx); } static void padlock_copy_ctx(struct auth_hash *axf, void *sctx, void *dctx) { if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 && (axf->type == CRYPTO_SHA1_HMAC || axf->type == CRYPTO_SHA2_256_HMAC)) { struct padlock_sha_ctx *spctx = sctx, *dpctx = dctx; dpctx->psc_offset = spctx->psc_offset; dpctx->psc_size = spctx->psc_size; dpctx->psc_buf = malloc(dpctx->psc_size, M_PADLOCK, M_WAITOK); bcopy(spctx->psc_buf, dpctx->psc_buf, dpctx->psc_size); } else { bcopy(sctx, dctx, axf->ctxsize); } } static void padlock_free_ctx(struct auth_hash *axf, void *ctx) { if ((via_feature_xcrypt & VIA_HAS_SHA) != 0 && (axf->type == CRYPTO_SHA1_HMAC || axf->type == CRYPTO_SHA2_256_HMAC)) { padlock_sha_free(ctx); } } static void padlock_hash_key_setup(struct padlock_session *ses, const uint8_t *key, int klen) { struct auth_hash *axf; axf = ses->ses_axf; /* * Try to free contexts before using them, because * padlock_hash_key_setup() can be called twice - once from * padlock_newsession() and again from padlock_process(). */ padlock_free_ctx(axf, ses->ses_ictx); padlock_free_ctx(axf, ses->ses_octx); hmac_init_ipad(axf, key, klen, ses->ses_ictx); hmac_init_opad(axf, key, klen, ses->ses_octx); } /* * Compute keyed-hash authenticator. */ static int padlock_authcompute(struct padlock_session *ses, struct cryptop *crp) { u_char hash[HASH_MAX_LEN], hash2[HASH_MAX_LEN]; struct auth_hash *axf; union authctx ctx; int error; axf = ses->ses_axf; padlock_copy_ctx(axf, ses->ses_ictx, &ctx); error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error != 0) { padlock_free_ctx(axf, &ctx); return (error); } error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error != 0) { padlock_free_ctx(axf, &ctx); return (error); } axf->Final(hash, &ctx); padlock_copy_ctx(axf, ses->ses_octx, &ctx); axf->Update(&ctx, hash, axf->hashsize); axf->Final(hash, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash2); if (timingsafe_bcmp(hash, hash2, ses->ses_mlen) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen, hash); return (0); } /* Find software structure which describes HMAC algorithm. */ static struct auth_hash * padlock_hash_lookup(int alg) { struct auth_hash *axf; switch (alg) { case CRYPTO_NULL_HMAC: axf = &auth_hash_null; break; case CRYPTO_SHA1_HMAC: if ((via_feature_xcrypt & VIA_HAS_SHA) != 0) axf = &padlock_hmac_sha1; else axf = &auth_hash_hmac_sha1; break; case CRYPTO_RIPEMD160_HMAC: axf = &auth_hash_hmac_ripemd_160; break; case CRYPTO_SHA2_256_HMAC: if ((via_feature_xcrypt & VIA_HAS_SHA) != 0) axf = &padlock_hmac_sha256; else axf = &auth_hash_hmac_sha2_256; break; case CRYPTO_SHA2_384_HMAC: axf = &auth_hash_hmac_sha2_384; break; case CRYPTO_SHA2_512_HMAC: axf = &auth_hash_hmac_sha2_512; break; default: axf = NULL; break; } return (axf); } bool padlock_hash_check(const struct crypto_session_params *csp) { return (padlock_hash_lookup(csp->csp_auth_alg) != NULL); } int padlock_hash_setup(struct padlock_session *ses, const struct crypto_session_params *csp) { ses->ses_axf = padlock_hash_lookup(csp->csp_auth_alg); if (csp->csp_auth_mlen == 0) ses->ses_mlen = ses->ses_axf->hashsize; else ses->ses_mlen = csp->csp_auth_mlen; /* Allocate memory for HMAC inner and outer contexts. */ ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_PADLOCK, M_ZERO | M_NOWAIT); ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_PADLOCK, M_ZERO | M_NOWAIT); if (ses->ses_ictx == NULL || ses->ses_octx == NULL) return (ENOMEM); /* Setup key if given. */ if (csp->csp_auth_key != NULL) { padlock_hash_key_setup(ses, csp->csp_auth_key, csp->csp_auth_klen); } return (0); } int padlock_hash_process(struct padlock_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { struct thread *td; int error; td = curthread; fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); if (crp->crp_auth_key != NULL) padlock_hash_key_setup(ses, crp->crp_auth_key, csp->csp_auth_klen); error = padlock_authcompute(ses, crp); fpu_kern_leave(td, ses->ses_fpu_ctx); return (error); } void padlock_hash_free(struct padlock_session *ses) { if (ses->ses_ictx != NULL) { padlock_free_ctx(ses->ses_axf, ses->ses_ictx); - bzero(ses->ses_ictx, ses->ses_axf->ctxsize); - free(ses->ses_ictx, M_PADLOCK); + zfree(ses->ses_ictx, M_PADLOCK); ses->ses_ictx = NULL; } if (ses->ses_octx != NULL) { padlock_free_ctx(ses->ses_axf, ses->ses_octx); - bzero(ses->ses_octx, ses->ses_axf->ctxsize); - free(ses->ses_octx, M_PADLOCK); + zfree(ses->ses_octx, M_PADLOCK); ses->ses_octx = NULL; } } Index: head/sys/dev/glxsb/glxsb_hash.c =================================================================== --- head/sys/dev/glxsb/glxsb_hash.c (revision 362625) +++ head/sys/dev/glxsb/glxsb_hash.c (revision 362626) @@ -1,157 +1,155 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "glxsb.h" /* * Implementation notes. * * The Geode LX Security Block provides AES-128-CBC acceleration. * We implement all HMAC algorithms provided by crypto(9) framework so glxsb can work * with ipsec(4) * * This code was stolen from crypto/via/padlock_hash.c */ MALLOC_DECLARE(M_GLXSB); static void glxsb_hash_key_setup(struct glxsb_session *ses, const char *key, int klen) { struct auth_hash *axf; axf = ses->ses_axf; hmac_init_ipad(axf, key, klen, ses->ses_ictx); hmac_init_opad(axf, key, klen, ses->ses_octx); } /* * Compute keyed-hash authenticator. */ static int glxsb_authcompute(struct glxsb_session *ses, struct cryptop *crp) { u_char hash[HASH_MAX_LEN], hash2[HASH_MAX_LEN]; struct auth_hash *axf; union authctx ctx; int error; axf = ses->ses_axf; bcopy(ses->ses_ictx, &ctx, axf->ctxsize); error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error != 0) return (error); error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error != 0) return (error); axf->Final(hash, &ctx); bcopy(ses->ses_octx, &ctx, axf->ctxsize); axf->Update(&ctx, hash, axf->hashsize); axf->Final(hash, &ctx); /* Verify or inject the authentication data */ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash2); if (timingsafe_bcmp(hash, hash2, ses->ses_mlen) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen, hash); return (0); } int glxsb_hash_setup(struct glxsb_session *ses, const struct crypto_session_params *csp) { ses->ses_axf = crypto_auth_hash(csp); if (csp->csp_auth_mlen == 0) ses->ses_mlen = ses->ses_axf->hashsize; else ses->ses_mlen = csp->csp_auth_mlen; /* Allocate memory for HMAC inner and outer contexts. */ ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_GLXSB, M_ZERO | M_NOWAIT); ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_GLXSB, M_ZERO | M_NOWAIT); if (ses->ses_ictx == NULL || ses->ses_octx == NULL) return (ENOMEM); /* Setup key if given. */ if (csp->csp_auth_key != NULL) { glxsb_hash_key_setup(ses, csp->csp_auth_key, csp->csp_auth_klen); } return (0); } int glxsb_hash_process(struct glxsb_session *ses, const struct crypto_session_params *csp, struct cryptop *crp) { int error; if (crp->crp_auth_key != NULL) glxsb_hash_key_setup(ses, crp->crp_auth_key, csp->csp_auth_klen); error = glxsb_authcompute(ses, crp); return (error); } void glxsb_hash_free(struct glxsb_session *ses) { if (ses->ses_ictx != NULL) { - bzero(ses->ses_ictx, ses->ses_axf->ctxsize); - free(ses->ses_ictx, M_GLXSB); + zfree(ses->ses_ictx, M_GLXSB); ses->ses_ictx = NULL; } if (ses->ses_octx != NULL) { - bzero(ses->ses_octx, ses->ses_axf->ctxsize); - free(ses->ses_octx, M_GLXSB); + zfree(ses->ses_octx, M_GLXSB); ses->ses_octx = NULL; } } Index: head/sys/geom/eli/g_eli.c =================================================================== --- head/sys/geom/eli/g_eli.c (revision 362625) +++ head/sys/geom/eli/g_eli.c (revision 362626) @@ -1,1454 +1,1452 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2019 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(geom_eli, "GEOM crypto module"); MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GEOM_ELI stuff"); static int g_eli_version = G_ELI_VERSION; SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, "GELI version"); int g_eli_debug = 0; SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, "Debug level"); static u_int g_eli_tries = 3; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, "Number of tries for entering the passphrase"); static u_int g_eli_visible_passphrase = GETS_NOECHO; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, &g_eli_visible_passphrase, 0, "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); u_int g_eli_overwrites = G_ELI_OVERWRITES; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 0, "Number of times on-disk keys should be overwritten when destroying them"); static u_int g_eli_threads = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, "Number of threads doing crypto work"); u_int g_eli_batch = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, "Use crypto operations batching"); /* * Passphrase cached during boot, in order to be more user-friendly if * there are multiple providers using the same passphrase. */ static char cached_passphrase[256]; static u_int g_eli_boot_passcache = 1; TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, &g_eli_boot_passcache, 0, "Passphrases are cached during boot process for possible reuse"); static void fetch_loader_passphrase(void * dummy) { char * env_passphrase; KASSERT(dynamic_kenv, ("need dynamic kenv")); if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { /* Extract passphrase from the environment. */ strlcpy(cached_passphrase, env_passphrase, sizeof(cached_passphrase)); freeenv(env_passphrase); /* Wipe the passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); } } SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, fetch_loader_passphrase, NULL); static void zero_boot_passcache(void) { explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); } static void zero_geli_intake_keys(void) { struct keybuf *keybuf; int i; if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, clear all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { explicit_bzero(keybuf->kb_ents[i].ke_data, sizeof(keybuf->kb_ents[i].ke_data)); keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; } } } } static void zero_intake_passcache(void *dummy) { zero_boot_passcache(); zero_geli_intake_keys(); } EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); static eventhandler_tag g_eli_pre_sync = NULL; static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, off_t offset, struct g_eli_metadata *md); static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp); static void g_eli_init(struct g_class *mp); static void g_eli_fini(struct g_class *mp); static g_taste_t g_eli_taste; static g_dumpconf_t g_eli_dumpconf; struct g_class g_eli_class = { .name = G_ELI_CLASS_NAME, .version = G_VERSION, .ctlreq = g_eli_config, .taste = g_eli_taste, .destroy_geom = g_eli_destroy_geom, .init = g_eli_init, .fini = g_eli_fini }; /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ /* * EAGAIN from crypto(9) means, that we were probably balanced to another crypto * accelerator or something like this. * The function updates the SID and rerun the operation. */ int g_eli_crypto_rerun(struct cryptop *crp) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; bp = (struct bio *)crp->crp_opaque; sc = bp->bio_to->geom->softc; LIST_FOREACH(wr, &sc->sc_workers, w_next) { if (wr->w_number == bp->bio_pflags) break; } KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).", bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid, crp->crp_session); wr->w_sid = crp->crp_session; crp->crp_etype = 0; error = crypto_dispatch(crp); if (error == 0) return (0); G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); crp->crp_etype = error; return (error); } static void g_eli_getattr_done(struct bio *bp) { if (bp->bio_error == 0 && !strcmp(bp->bio_attribute, "GEOM::physpath")) { strlcat(bp->bio_data, "/eli", bp->bio_length); } g_std_done(bp); } /* * The function is called afer reading encrypted data from the provider. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_read_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; sc = pbp->bio_to->geom->softc; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; if (pbp->bio_driver2 != NULL) { free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; } g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); return; } mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, pbp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); } /* * The function is called after we encrypt and write data. * * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver */ void g_eli_write_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; } else pbp->bio_completed = pbp->bio_length; /* * Write is finished, send it up. */ sc = pbp->bio_to->geom->softc; g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); } /* * This function should never be called, but GEOM made as it set ->orphan() * method for every geom. */ static void g_eli_orphan_spoil_assert(struct g_consumer *cp) { panic("Function %s() called for %s.", __func__, cp->geom->name); } static void g_eli_orphan(struct g_consumer *cp) { struct g_eli_softc *sc; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; g_eli_destroy(sc, TRUE); } static void g_eli_resize(struct g_consumer *cp) { struct g_eli_softc *sc; struct g_provider *epp, *pp; off_t oldsize; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; if ((sc->sc_flags & G_ELI_FLAG_AUTORESIZE) == 0) { G_ELI_DEBUG(0, "Autoresize is turned off, old size: %jd.", (intmax_t)sc->sc_provsize); return; } pp = cp->provider; if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) { struct g_eli_metadata md; u_char *sector; int error; sector = NULL; error = g_eli_read_metadata_offset(cp->geom->class, pp, sc->sc_provsize - pp->sectorsize, &md); if (error != 0) { G_ELI_DEBUG(0, "Cannot read metadata from %s (error=%d).", pp->name, error); goto iofail; } md.md_provsize = pp->mediasize; sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO); eli_metadata_encode(&md, sector); error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector, pp->sectorsize); if (error != 0) { G_ELI_DEBUG(0, "Cannot store metadata on %s (error=%d).", pp->name, error); goto iofail; } explicit_bzero(sector, pp->sectorsize); error = g_write_data(cp, sc->sc_provsize - pp->sectorsize, sector, pp->sectorsize); if (error != 0) { G_ELI_DEBUG(0, "Cannot clear old metadata from %s (error=%d).", pp->name, error); goto iofail; } iofail: explicit_bzero(&md, sizeof(md)); zfree(sector, M_ELI); } oldsize = sc->sc_mediasize; sc->sc_mediasize = eli_mediasize(sc, pp->mediasize, pp->sectorsize); g_eli_key_resize(sc); sc->sc_provsize = pp->mediasize; epp = LIST_FIRST(&sc->sc_geom->provider); g_resize_provider(epp, sc->sc_mediasize); G_ELI_DEBUG(0, "Device %s size changed from %jd to %jd.", epp->name, (intmax_t)oldsize, (intmax_t)sc->sc_mediasize); } /* * BIO_READ: * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ static void g_eli_start(struct bio *bp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *cbp; sc = bp->bio_to->geom->softc; KASSERT(sc != NULL, ("Provider's error should be set (error=%d)(device=%s).", bp->bio_to->error, bp->bio_to->name)); G_ELI_LOGREQ(2, bp, "Request received."); switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_GETATTR: case BIO_FLUSH: case BIO_ZONE: case BIO_SPEEDUP: break; case BIO_DELETE: /* * If the user hasn't set the NODELETE flag, we just pass * it down the stack and let the layers beneath us do (or * not) whatever they do with it. If they have, we * reject it. A possible extension would be an * additional flag to take it as a hint to shred the data * with [multiple?] overwrites. */ if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) break; default: g_io_deliver(bp, EOPNOTSUPP); return; } cbp = g_clone_bio(bp); if (cbp == NULL) { g_io_deliver(bp, ENOMEM); return; } bp->bio_driver1 = cbp; bp->bio_pflags = G_ELI_NEW_BIO; switch (bp->bio_cmd) { case BIO_READ: if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { g_eli_crypto_read(sc, bp, 0); break; } /* FALLTHROUGH */ case BIO_WRITE: mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); break; case BIO_GETATTR: case BIO_FLUSH: case BIO_DELETE: case BIO_SPEEDUP: case BIO_ZONE: if (bp->bio_cmd == BIO_GETATTR) cbp->bio_done = g_eli_getattr_done; else cbp->bio_done = g_std_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); break; } } static int g_eli_newsession(struct g_eli_worker *wr) { struct g_eli_softc *sc; struct crypto_session_params csp; uint32_t caps; int error, new_crypto; void *key; sc = wr->w_softc; memset(&csp, 0, sizeof(csp)); csp.csp_mode = CSP_MODE_CIPHER; csp.csp_cipher_alg = sc->sc_ealgo; csp.csp_ivlen = g_eli_ivlen(sc->sc_ealgo); csp.csp_cipher_klen = sc->sc_ekeylen / 8; if (sc->sc_ealgo == CRYPTO_AES_XTS) csp.csp_cipher_klen <<= 1; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { key = g_eli_key_hold(sc, 0, LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); csp.csp_cipher_key = key; } else { key = NULL; csp.csp_cipher_key = sc->sc_ekey; } if (sc->sc_flags & G_ELI_FLAG_AUTH) { csp.csp_mode = CSP_MODE_ETA; csp.csp_auth_alg = sc->sc_aalgo; csp.csp_auth_klen = G_ELI_AUTH_SECKEYLEN; } switch (sc->sc_crypto) { case G_ELI_CRYPTO_SW_ACCEL: case G_ELI_CRYPTO_SW: error = crypto_newsession(&wr->w_sid, &csp, CRYPTOCAP_F_SOFTWARE); break; case G_ELI_CRYPTO_HW: error = crypto_newsession(&wr->w_sid, &csp, CRYPTOCAP_F_HARDWARE); break; case G_ELI_CRYPTO_UNKNOWN: error = crypto_newsession(&wr->w_sid, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); if (error == 0) { caps = crypto_ses2caps(wr->w_sid); if (caps & CRYPTOCAP_F_HARDWARE) new_crypto = G_ELI_CRYPTO_HW; else if (caps & CRYPTOCAP_F_ACCEL_SOFTWARE) new_crypto = G_ELI_CRYPTO_SW_ACCEL; else new_crypto = G_ELI_CRYPTO_SW; mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) sc->sc_crypto = new_crypto; mtx_unlock(&sc->sc_queue_mtx); } break; default: panic("%s: invalid condition", __func__); } if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { if (error) g_eli_key_drop(sc, key); else wr->w_first_key = key; } return (error); } static void g_eli_freesession(struct g_eli_worker *wr) { struct g_eli_softc *sc; crypto_freesession(wr->w_sid); if (wr->w_first_key != NULL) { sc = wr->w_softc; g_eli_key_drop(sc, wr->w_first_key); wr->w_first_key = NULL; } } static void g_eli_cancel(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, ("Not new bio when canceling (bp=%p).", bp)); g_io_deliver(bp, ENXIO); } } static struct bio * g_eli_takefirst(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) return (bioq_takefirst(&sc->sc_queue)); /* * Device suspended, so we skip new I/O requests. */ TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { if (bp->bio_pflags != G_ELI_NEW_BIO) break; } if (bp != NULL) bioq_remove(&sc->sc_queue, bp); return (bp); } /* * This is the main function for kernel worker thread when we don't have * hardware acceleration and we have to do cryptography in software. * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM * threads with crypto work. */ static void g_eli_worker(void *arg) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; wr = arg; sc = wr->w_softc; #ifdef EARLY_AP_STARTUP MPASS(!sc->sc_cpubind || smp_started); #elif defined(SMP) /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ if (sc->sc_cpubind) { while (!smp_started) tsleep(wr, 0, "geli:smp", hz / 4); } #endif thread_lock(curthread); sched_prio(curthread, PUSER); if (sc->sc_cpubind) sched_bind(curthread, wr->w_number % mp_ncpus); thread_unlock(curthread); G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); for (;;) { mtx_lock(&sc->sc_queue_mtx); again: bp = g_eli_takefirst(sc); if (bp == NULL) { if (sc->sc_flags & G_ELI_FLAG_DESTROY) { g_eli_cancel(sc); LIST_REMOVE(wr, w_next); g_eli_freesession(wr); free(wr, M_ELI); G_ELI_DEBUG(1, "Thread %s exiting.", curthread->td_proc->p_comm); wakeup(&sc->sc_workers); mtx_unlock(&sc->sc_queue_mtx); kproc_exit(0); } while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { if (sc->sc_inflight > 0) { G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight); /* * We still have inflight BIOs, so * sleep and retry. */ msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:inf", hz / 5); goto again; } /* * Suspend requested, mark the worker as * suspended and go to sleep. */ if (wr->w_active) { g_eli_freesession(wr); wr->w_active = FALSE; } wakeup(&sc->sc_workers); msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:suspend", 0); if (!wr->w_active && !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { error = g_eli_newsession(wr); KASSERT(error == 0, ("g_eli_newsession() failed on resume (error=%d)", error)); wr->w_active = TRUE; } goto again; } msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); continue; } if (bp->bio_pflags == G_ELI_NEW_BIO) atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); if (bp->bio_pflags == G_ELI_NEW_BIO) { bp->bio_pflags = 0; if (sc->sc_flags & G_ELI_FLAG_AUTH) { if (bp->bio_cmd == BIO_READ) g_eli_auth_read(sc, bp); else g_eli_auth_run(wr, bp); } else { if (bp->bio_cmd == BIO_READ) g_eli_crypto_read(sc, bp, 1); else g_eli_crypto_run(wr, bp); } } else { if (sc->sc_flags & G_ELI_FLAG_AUTH) g_eli_auth_run(wr, bp); else g_eli_crypto_run(wr, bp); } } } static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, off_t offset, struct g_eli_metadata *md) { struct g_geom *gp; struct g_consumer *cp; u_char *buf = NULL; int error; g_topology_assert(); gp = g_new_geomf(mp, "eli:taste"); gp->start = g_eli_start; gp->access = g_std_access; /* * g_eli_read_metadata() is always called from the event thread. * Our geom is created and destroyed in the same event, so there * could be no orphan nor spoil event in the meantime. */ gp->orphan = g_eli_orphan_spoil_assert; gp->spoiled = g_eli_orphan_spoil_assert; cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error != 0) goto end; error = g_access(cp, 1, 0, 0); if (error != 0) goto end; g_topology_unlock(); buf = g_read_data(cp, offset, pp->sectorsize, &error); g_topology_lock(); if (buf == NULL) goto end; error = eli_metadata_decode(buf, md); if (error != 0) goto end; /* Metadata was read and decoded successfully. */ end: if (buf != NULL) g_free(buf); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, 0, 0); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); return (error); } int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, struct g_eli_metadata *md) { return (g_eli_read_metadata_offset(mp, pp, pp->mediasize - pp->sectorsize, md)); } /* * The function is called when we had last close on provider and user requested * to close it when this situation occur. */ static void g_eli_last_close(void *arg, int flags __unused) { struct g_geom *gp; char gpname[64]; int error; g_topology_assert(); gp = arg; strlcpy(gpname, gp->name, sizeof(gpname)); error = g_eli_destroy(gp->softc, TRUE); KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", gpname, error)); G_ELI_DEBUG(0, "Detached %s on last close.", gpname); } int g_eli_access(struct g_provider *pp, int dr, int dw, int de) { struct g_eli_softc *sc; struct g_geom *gp; gp = pp->geom; sc = gp->softc; if (dw > 0) { if (sc->sc_flags & G_ELI_FLAG_RO) { /* Deny write attempts. */ return (EROFS); } /* Someone is opening us for write, we need to remember that. */ sc->sc_flags |= G_ELI_FLAG_WOPEN; return (0); } /* Is this the last close? */ if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) return (0); /* * Automatically detach on last close if requested. */ if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || (sc->sc_flags & G_ELI_FLAG_WOPEN)) { g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); } return (0); } static int g_eli_cpu_is_disabled(int cpu) { #ifdef SMP return (CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (0); #endif } struct g_geom * g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, const struct g_eli_metadata *md, const u_char *mkey, int nkey) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; struct g_geom_alias *gap; u_int i, threads; int dcw, error; G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); KASSERT(eli_metadata_crypto_supported(md), ("%s: unsupported crypto for %s", __func__, bpp->name)); gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); gp->start = g_eli_start; /* * Spoiling can happen even though we have the provider open * exclusively, e.g. through media change events. */ gp->spoiled = g_eli_orphan; gp->orphan = g_eli_orphan; gp->resize = g_eli_resize; gp->dumpconf = g_eli_dumpconf; /* * If detach-on-last-close feature is not enabled and we don't operate * on read-only provider, we can simply use g_std_access(). */ if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) gp->access = g_eli_access; else gp->access = g_std_access; eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); sc->sc_nkey = nkey; gp->softc = sc; sc->sc_geom = gp; bioq_init(&sc->sc_queue); mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); pp = NULL; cp = g_new_consumer(gp); error = g_attach(cp, bpp); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot attach to %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", bpp->name, error); } goto failed; } /* * Keep provider open all the time, so we can run critical tasks, * like Master Keys deletion, without wondering if we can open * provider or not. * We don't open provider for writing only when user requested read-only * access. */ dcw = (sc->sc_flags & G_ELI_FLAG_RO) ? 0 : 1; error = g_access(cp, 1, dcw, 1); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot access %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot access %s (error=%d).", bpp->name, error); } goto failed; } /* * Remember the keys in our softc structure. */ g_eli_mkey_propagate(sc, mkey); LIST_INIT(&sc->sc_workers); threads = g_eli_threads; if (threads == 0) threads = mp_ncpus; sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); for (i = 0; i < threads; i++) { if (g_eli_cpu_is_disabled(i)) { G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", bpp->name, i); continue; } wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); wr->w_softc = sc; wr->w_number = i; wr->w_active = TRUE; error = g_eli_newsession(wr); if (error != 0) { free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } goto failed; } error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, "g_eli[%u] %s", i, bpp->name); if (error != 0) { g_eli_freesession(wr); free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } goto failed; } LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); } /* * Create decrypted provider. */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; LIST_FOREACH(gap, &bpp->aliases, ga_next) g_provider_add_alias(pp, "%s%s", gap->ga_alias, G_ELI_SUFFIX); g_error_provider(pp, 0); G_ELI_DEBUG(0, "Device %s created.", pp->name); G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), sc->sc_ekeylen); if (sc->sc_flags & G_ELI_FLAG_AUTH) G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); G_ELI_DEBUG(0, " Crypto: %s", sc->sc_crypto == G_ELI_CRYPTO_SW_ACCEL ? "accelerated software" : sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); return (gp); failed: mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); /* * Wait for kernel threads self destruction. */ while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, -dcw, -1); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); g_eli_key_destroy(sc); - bzero(sc, sizeof(*sc)); - free(sc, M_ELI); + zfree(sc, M_ELI); return (NULL); } int g_eli_destroy(struct g_eli_softc *sc, boolean_t force) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (sc == NULL) return (ENXIO); gp = sc->sc_geom; pp = LIST_FIRST(&gp->provider); if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { if (force) { G_ELI_DEBUG(1, "Device %s is still open, so it " "cannot be definitely removed.", pp->name); sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; g_wither_provider(pp, ENXIO); return (EBUSY); } else { G_ELI_DEBUG(1, "Device %s is still open (r%dw%de%d).", pp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } } mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); gp->softc = NULL; g_eli_key_destroy(sc); - bzero(sc, sizeof(*sc)); - free(sc, M_ELI); + zfree(sc, M_ELI); G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); g_wither_geom_close(gp, ENXIO); return (0); } static int g_eli_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, struct g_geom *gp) { struct g_eli_softc *sc; sc = gp->softc; return (g_eli_destroy(sc, FALSE)); } static int g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) { u_char *keyfile, *data; char *file, name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL && i == 0) { /* * If there is only one keyfile, allow simpler name. */ snprintf(name, sizeof(name), "%s:geli_keyfile", provider); keyfile = preload_search_by_type(name); } if (keyfile == NULL) return (i); /* Return number of loaded keyfiles. */ data = preload_fetch_addr(keyfile); if (data == NULL) { G_ELI_DEBUG(0, "Cannot find key file data for %s.", name); return (0); } size = preload_fetch_size(keyfile); if (size == 0) { G_ELI_DEBUG(0, "Cannot find key file size for %s.", name); return (0); } file = preload_search_info(keyfile, MODINFO_NAME); if (file == NULL) { G_ELI_DEBUG(0, "Cannot find key file name for %s.", name); return (0); } G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, provider, name); g_eli_crypto_hmac_update(ctx, data, size); } } static void g_eli_keyfiles_clear(const char *provider) { u_char *keyfile, *data; char name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL) return; data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data != NULL && size != 0) bzero(data, size); } } /* * Tasting is only made on boot. * We detect providers which should be attached before root is mounted. */ static struct g_geom * g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_eli_metadata md; struct g_geom *gp; struct hmac_ctx ctx; char passphrase[256]; u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; u_int i, nkey, nkeyfiles, tries, showpass; int error; struct keybuf *keybuf; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); if (root_mounted() || g_eli_tries == 0) return (NULL); G_ELI_DEBUG(3, "Tasting %s.", pp->name); error = g_eli_read_metadata(mp, pp, &md); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) return (NULL); if (md.md_version > G_ELI_VERSION) { printf("geom_eli.ko module is too old to handle %s.\n", pp->name); return (NULL); } if (md.md_provsize != pp->mediasize) return (NULL); /* Should we attach it on boot? */ if (!(md.md_flags & G_ELI_FLAG_BOOT) && !(md.md_flags & G_ELI_FLAG_GELIBOOT)) return (NULL); if (md.md_keys == 0x00) { G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); return (NULL); } if (!eli_metadata_crypto_supported(&md)) { G_ELI_DEBUG(0, "%s uses invalid or unsupported algorithms\n", pp->name); return (NULL); } if (md.md_iterations == -1) { /* If there is no passphrase, we try only once. */ tries = 1; } else { /* Ask for the passphrase no more than g_eli_tries times. */ tries = g_eli_tries; } if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, try all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { memcpy(key, keybuf->kb_ents[i].ke_data, sizeof(key)); if (g_eli_mkey_decrypt_any(&md, key, mkey, &nkey) == 0 ) { explicit_bzero(key, sizeof(key)); goto have_key; } } } } for (i = 0; i <= tries; i++) { g_eli_crypto_hmac_init(&ctx, NULL, 0); /* * Load all key files. */ nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); if (nkeyfiles == 0 && md.md_iterations == -1) { /* * No key files and no passphrase, something is * definitely wrong here. * geli(8) doesn't allow for such situation, so assume * that there was really no passphrase and in that case * key files are no properly defined in loader.conf. */ G_ELI_DEBUG(0, "Found no key files in loader.conf for %s.", pp->name); return (NULL); } /* Ask for the passphrase if defined. */ if (md.md_iterations >= 0) { /* Try first with cached passphrase. */ if (i == 0) { if (!g_eli_boot_passcache) continue; memcpy(passphrase, cached_passphrase, sizeof(passphrase)); } else { printf("Enter passphrase for %s: ", pp->name); showpass = g_eli_visible_passphrase; if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) showpass = GETS_ECHOPASS; cngets(passphrase, sizeof(passphrase), showpass); memcpy(cached_passphrase, passphrase, sizeof(passphrase)); } } /* * Prepare Derived-Key from the user passphrase. */ if (md.md_iterations == 0) { g_eli_crypto_hmac_update(&ctx, md.md_salt, sizeof(md.md_salt)); g_eli_crypto_hmac_update(&ctx, passphrase, strlen(passphrase)); explicit_bzero(passphrase, sizeof(passphrase)); } else if (md.md_iterations > 0) { u_char dkey[G_ELI_USERKEYLEN]; pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, sizeof(md.md_salt), passphrase, md.md_iterations); bzero(passphrase, sizeof(passphrase)); g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); explicit_bzero(dkey, sizeof(dkey)); } g_eli_crypto_hmac_final(&ctx, key, 0); /* * Decrypt Master-Key. */ error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey); bzero(key, sizeof(key)); if (error == -1) { if (i == tries) { G_ELI_DEBUG(0, "Wrong key for %s. No tries left.", pp->name); g_eli_keyfiles_clear(pp->name); return (NULL); } if (i > 0) { G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", pp->name, tries - i); } /* Try again. */ continue; } else if (error > 0) { G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", pp->name, error); g_eli_keyfiles_clear(pp->name); return (NULL); } g_eli_keyfiles_clear(pp->name); G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); break; } have_key: /* * We have correct key, let's attach provider. */ gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); bzero(mkey, sizeof(mkey)); bzero(&md, sizeof(md)); if (gp == NULL) { G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, G_ELI_SUFFIX); return (NULL); } return (gp); } static void g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_eli_softc *sc; g_topology_assert(); sc = gp->softc; if (sc == NULL) return; if (pp != NULL || cp != NULL) return; /* Nothing here. */ sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_total); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_allocated); sbuf_printf(sb, "%s", indent); if (sc->sc_flags == 0) sbuf_cat(sb, "NONE"); else { int first = 1; #define ADD_FLAG(flag, name) do { \ if (sc->sc_flags & (flag)) { \ if (!first) \ sbuf_cat(sb, ", "); \ else \ first = 0; \ sbuf_cat(sb, name); \ } \ } while (0) ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); ADD_FLAG(G_ELI_FLAG_AUTORESIZE, "AUTORESIZE"); #undef ADD_FLAG } sbuf_cat(sb, "\n"); if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { sbuf_printf(sb, "%s%u\n", indent, sc->sc_nkey); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_version); sbuf_printf(sb, "%s", indent); switch (sc->sc_crypto) { case G_ELI_CRYPTO_HW: sbuf_cat(sb, "hardware"); break; case G_ELI_CRYPTO_SW: sbuf_cat(sb, "software"); break; case G_ELI_CRYPTO_SW_ACCEL: sbuf_cat(sb, "accelerated software"); break; default: sbuf_cat(sb, "UNKNOWN"); break; } sbuf_cat(sb, "\n"); if (sc->sc_flags & G_ELI_FLAG_AUTH) { sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_aalgo)); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_ekeylen); sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_ealgo)); sbuf_printf(sb, "%s%s\n", indent, (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); } static void g_eli_shutdown_pre_sync(void *arg, int howto) { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp; struct g_eli_softc *sc; int error; mp = arg; g_topology_lock(); LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { sc = gp->softc; if (sc == NULL) continue; pp = LIST_FIRST(&gp->provider); KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0 || SCHEDULER_STOPPED()) { sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; } else { error = g_eli_destroy(sc, TRUE); } } g_topology_unlock(); } static void g_eli_init(struct g_class *mp) { g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); if (g_eli_pre_sync == NULL) G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); } static void g_eli_fini(struct g_class *mp) { if (g_eli_pre_sync != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); } DECLARE_GEOM_CLASS(g_eli_class, g_eli); MODULE_DEPEND(g_eli, crypto, 1, 1, 1); MODULE_VERSION(geom_eli, 0);