diff --git a/share/man/man9/crypto_driver.9 b/share/man/man9/crypto_driver.9 index 5a205ee4a094..930cc0d8bde4 100644 --- a/share/man/man9/crypto_driver.9 +++ b/share/man/man9/crypto_driver.9 @@ -1,341 +1,343 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd May 25, 2020 +.Dd June 9, 2020 .Dt CRYPTO_DRIVER 9 .Os .Sh NAME .Nm crypto_driver .Nd interface for symmetric cryptographic drivers .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft void .Fn crypto_copyback "struct cryptop *crp" "int off" "int size" "const void *src" .Ft void .Fn crypto_copydata "struct cryptop *crp" "int off" "int size" "void *dst" .Ft void .Fn crypto_done "struct cryptop *crp" .Ft int32_t .Fn crypto_get_driverid "device_t dev" "size_t session_size" "int flags" .Ft void * .Fn crypto_get_driver_session "crypto_session_t crypto_session" .Ft void .Fn crypto_read_iv "struct cryptop *crp" "void *iv" .Ft int .Fn crypto_unblock "uint32_t driverid" "int what" .Ft int .Fn crypto_unregister_all "uint32_t driverid" .Ft int .Fn CRYPTODEV_FREESESSION "device_t dev" "crypto_session_t crypto_session" .Ft int .Fo CRYPTODEV_NEWSESSION .Fa "device_t dev" .Fa "crypto_session_t crypto_session" .Fa "const struct crypto_session_params *csp" .Fc .Ft int .Fo CRYPTODEV_PROBESESSION .Fa "device_t dev" .Fa "const struct crypto_session_params *csp" .Fc .Ft int .Fn CRYPTODEV_PROCESS "device_t dev" "struct cryptop *crp" "int flags" .Ft void .Fo hmac_init_ipad .Fa "struct auth_hash *axf" .Fa "const char *key" .Fa "int klen" .Fa "void *auth_ctx" .Fc .Ft void .Fo hmac_init_opad .Fa "struct auth_hash *axf" .Fa "const char *key" .Fa "int klen" .Fa "void *auth_ctx" .Fc .Sh DESCRIPTION Symmetric cryptographic drivers process cryptographic requests submitted to sessions associated with the driver. .Pp Cryptographic drivers call .Fn crypto_get_driverid to register with the cryptographic framework. .Fa dev is the device used to service requests. The .Fn CRYPTODEV methods are defined in the method table for the device driver attached to .Fa dev . .Fa session_size specifies the size of a driver-specific per-session structure allocated by the cryptographic framework. .Fa flags is a bitmask of properties about the driver. Exactly one of .Dv CRYPTOCAP_F_SOFTWARE or .Dv CRYPTOCAP_F_HARDWARE must be specified. .Dv CRYPTOCAP_F_SOFTWARE should be used for drivers which process requests using host CPUs. .Dv CRYPTOCAP_F_HARDWARE should be used for drivers which process requests on separate co-processors. .Dv CRYPTOCAP_F_SYNC should be set for drivers which process requests synchronously in .Fn CRYPTODEV_PROCESS . +.Dv CRYPTOCAP_F_ACCEL_SOFTWARE +should be set for software drivers which use accelerated CPU instructions. .Fn crypto_get_driverid returns an opaque driver id. .Pp .Fn crypto_unregister_all unregisters a driver from the cryptographic framework. If there are any pending operations or open sessions, this function will sleep. .Fa driverid is the value returned by an earlier call to .Fn crypto_get_driverid . .Pp When a new session is created by .Fn crypto_newsession , .Fn CRYPTODEV_PROBESESSION is invoked by the cryptographic framework on each active driver to determine the best driver to use for the session. This method should inspect the session parameters in .Fa csp . If a driver does not support requests described by .Fa csp , this method should return an error value. If the driver does support requests described by .Fa csp , it should return a negative value. The framework prefers drivers with the largest negative value, similar to .Xr DEVICE_PROBE 9 . The following values are defined for non-error return values from this method: .Bl -tag -width "CRYPTODEV_PROBE_ACCEL_SOFTWARE" .It Dv CRYPTODEV_PROBE_HARDWARE The driver processes requests via a co-processor. .It Dv CRYPTODEV_PROBE_ACCEL_SOFTWARE The driver processes requests on the host CPU using optimized instructions such as AES-NI. .It Dv CRYPTODEV_PROBE_SOFTWARE The driver processes requests on the host CPU. .El .Pp This method should not sleep. .Pp Once the framework has chosen a driver for a session, the framework invokes the .Fn CRYPTODEV_NEWSESSION method to initialize driver-specific session state. Prior to calling this method, the framework allocates a per-session driver-specific data structure. This structure is initialized with zeroes, and its size is set by the .Fa session_size passed to .Fn crypto_get_driverid . This method can retrieve a pointer to this data structure by passing .Fa crypto_session to .Fn crypto_get_driver_session . Session parameters are described in .Fa csp . .Pp This method should not sleep. .Pp .Fn CRYPTODEV_FREESESSION is invoked to release any driver-specific state when a session is destroyed. The per-session driver-specific data structure is explicitly zeroed and freed by the framework after this method returns. If a driver requires no additional tear-down steps, it can leave this method undefined. .Pp This method should not sleep. .Pp .Fn CRYPTODEV_PROCESS is invoked for each request submitted to an active session. This method can either complete a request synchronously or schedule it to be completed asynchronously, but it must not sleep. .Pp If this method is not able to complete a request due to insufficient resources such as a full command queue, it can defer the request by returning .Dv ERESTART . The request will be queued by the framework and retried once the driver releases pending requests via .Fn crypto_unblock . Any requests submitted to sessions belonging to the driver will also be queued until .Fn crypto_unblock is called. .Pp If a driver encounters errors while processing a request, it should report them via the .Fa crp_etype field of .Fa crp rather than returning an error directly. .Pp .Fa flags may be set to .Dv CRYPTO_HINT_MORE if there are additional requests queued for this driver. The driver can use this as a hint to batch completion interrupts. Note that these additional requests may be from different sessions. .Pp .Fn crypto_get_driver_session returns a pointer to the driver-specific per-session data structure for the session .Fa crypto_session . This function can be used in the .Fn CRYPTODEV_NEWSESSION , .Fn CRYPTODEV_PROCESS , and .Fn CRYPTODEV_FREESESSION callbacks. .Pp .Fn crypto_copydata copies .Fa size bytes out of the input buffer for .Fa crp into a local buffer pointed to by .Fa dst . The bytes are read starting at an offset of .Fa off bytes in the request's input buffer. .Pp .Fn crypto_copyback copies .Fa size bytes from the local buffer pointed to by .Fa src into the output buffer for .Fa crp . The bytes are written starting at an offset of .Fa off bytes in the request's output buffer. .Pp .Fn crypto_read_iv copies the IV or nonce for .Fa crp into the local buffer pointed to by .Fa iv . .Pp A driver calls .Fn crypto_done to mark the request .Fa crp as completed. Any errors should be set in .Fa crp_etype prior to calling this function. .Pp If a driver defers a request by returning .Dv ERESTART from .Dv CRYPTO_PROCESS , the framework will queue all requests for the driver until the driver calls .Fn crypto_unblock to indicate that the temporary resource shortage has been relieved. For example, if a driver returns .Dv ERESTART due to a full command ring, it would invoke .Fn crypto_unblock from a command completion interrupt that makes a command ring entry available. .Fa driverid is the value returned by .Fn crypto_get_driverid . .Fa what indicates which types of requests the driver is able to handle again: .Bl -tag -width "CRYPTO_ASYMQ" .It Dv CRYPTO_SYMQ indicates that the driver is able to handle symmetric requests passed to .Fn CRYPTODEV_PROCESS . .It Dv CRYPTO_ASYMQ indicates that the driver is able to handle asymmetric requests passed to .Fn CRYPTODEV_KPROCESS . .El .Pp .Pp .Fn hmac_init_ipad prepares an authentication context to generate the inner hash of an HMAC. .Fa axf is a software implementation of an authentication algorithm such as the value returned by .Fn crypto_auth_hash . .Fa key is a pointer to a HMAC key of .Fa klen bytes. .Fa auth_ctx points to a valid authentication context for the desired algorithm. The function initializes the context with the supplied key. .Pp .Fn hmac_init_opad is similar to .Fn hmac_init_ipad except that it prepares an authentication context to generate the outer hash of an HMAC. .Sh RETURN VALUES .Fn crypto_apply returns the return value from the caller-supplied callback function. .Pp .Fn crypto_contiguous_subsegment returns a pointer to a contiguous segment or .Dv NULL . .Pp .Fn crypto_get_driverid returns a driver identifier on success or -1 on error. .Pp .Fn crypto_unblock , .Fn crypto_unregister_all , .Fn CRYPTODEV_FREESESSION , .Fn CRYPTODEV_NEWSESSION , and .Fn CRYPTODEV_PROCESS return zero on success or an error on failure. .Pp .Fn CRYPTODEV_PROBESESSION returns a negative value on success or an error on failure. .Sh SEE ALSO .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_buffer 9 , .Xr crypto_request 9 , .Xr crypto_session 9 diff --git a/sys/crypto/aesni/aesni.c b/sys/crypto/aesni/aesni.c index 2ef0f7f39de5..38be9b0acb68 100644 --- a/sys/crypto/aesni/aesni.c +++ b/sys/crypto/aesni/aesni.c @@ -1,905 +1,906 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014 The FreeBSD Foundation * Copyright (c) 2017 Conrad Meyer * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) #include #elif defined(__amd64__) #include #endif static struct mtx_padalign *ctx_mtx; static struct fpu_kern_ctx **ctx_fpu; struct aesni_softc { int32_t cid; bool has_aes; bool has_sha; }; #define ACQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_fpu[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int aesni_cipher_setup(struct aesni_session *ses, const struct crypto_session_params *csp); static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp); static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp); static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp); MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); static void aesni_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "aesni", -1) == NULL && BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) panic("aesni: could not attach"); } static void detect_cpu_features(bool *has_aes, bool *has_sha) { *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && (cpu_feature2 & CPUID2_SSE41) != 0); *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && (cpu_feature2 & CPUID2_SSSE3) != 0); } static int aesni_probe(device_t dev) { bool has_aes, has_sha; detect_cpu_features(&has_aes, &has_sha); if (!has_aes && !has_sha) { device_printf(dev, "No AES or SHA support.\n"); return (EINVAL); } else if (has_aes && has_sha) device_set_desc(dev, "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256"); else if (has_aes) device_set_desc(dev, "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS"); else device_set_desc(dev, "SHA1,SHA256"); return (0); } static void aesni_cleanctx(void) { int i; /* XXX - no way to return driverid */ CPU_FOREACH(i) { if (ctx_fpu[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_fpu[i]); } ctx_fpu[i] = NULL; } free(ctx_mtx, M_AESNI); ctx_mtx = NULL; free(ctx_fpu, M_AESNI); ctx_fpu = NULL; } static int aesni_attach(device_t dev) { struct aesni_softc *sc; int i; sc = device_get_softc(dev); sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), - CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | + CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_fpu[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); } detect_cpu_features(&sc->has_aes, &sc->has_sha); return (0); } static int aesni_detach(device_t dev) { struct aesni_softc *sc; sc = device_get_softc(dev); crypto_unregister_all(sc->cid); aesni_cleanctx(); return (0); } static bool aesni_auth_supported(struct aesni_softc *sc, const struct crypto_session_params *csp) { if (!sc->has_sha) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: break; default: return (false); } return (true); } static bool aesni_cipher_supported(struct aesni_softc *sc, const struct crypto_session_params *csp) { if (!sc->has_aes) return (false); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); return (sc->has_aes); case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); return (sc->has_aes); default: return (false); } } static int aesni_probesession(device_t dev, const struct crypto_session_params *csp) { struct aesni_softc *sc; sc = device_get_softc(dev); if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!aesni_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!aesni_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_auth_mlen != 0 && csp->csp_auth_mlen != GMAC_DIGEST_LEN) return (EINVAL); if (csp->csp_ivlen != AES_GCM_IV_LEN || !sc->has_aes) return (EINVAL); break; case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen != 0 && csp->csp_auth_mlen != AES_CBC_MAC_HASH_LEN) return (EINVAL); if (csp->csp_ivlen != AES_CCM_IV_LEN || !sc->has_aes) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!aesni_auth_supported(sc, csp) || !aesni_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static int aesni_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct aesni_softc *sc; struct aesni_session *ses; int error; sc = device_get_softc(dev); ses = crypto_get_driver_session(cses); switch (csp->csp_mode) { case CSP_MODE_DIGEST: case CSP_MODE_CIPHER: case CSP_MODE_AEAD: case CSP_MODE_ETA: break; default: return (EINVAL); } error = aesni_cipher_setup(ses, csp); if (error != 0) { CRYPTDEB("setup failed"); return (error); } return (0); } static int aesni_process(device_t dev, struct cryptop *crp, int hint __unused) { struct aesni_session *ses; int error; ses = crypto_get_driver_session(crp->crp_session); error = aesni_cipher_process(ses, crp); crp->crp_etype = error; crypto_done(crp); return (0); } static uint8_t * aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, start, length); if (addr != NULL) { *allocated = false; return (addr); } addr = malloc(length, M_AESNI, M_NOWAIT); if (addr != NULL) { *allocated = true; crypto_copydata(crp, start, length, addr); } else *allocated = false; return (addr); } static device_method_t aesni_methods[] = { DEVMETHOD(device_identify, aesni_identify), DEVMETHOD(device_probe, aesni_probe), DEVMETHOD(device_attach, aesni_attach), DEVMETHOD(device_detach, aesni_detach), DEVMETHOD(cryptodev_probesession, aesni_probesession), DEVMETHOD(cryptodev_newsession, aesni_newsession), DEVMETHOD(cryptodev_process, aesni_process), DEVMETHOD_END }; static driver_t aesni_driver = { "aesni", aesni_methods, sizeof(struct aesni_softc), }; static devclass_t aesni_devclass; DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); MODULE_VERSION(aesni, 1); MODULE_DEPEND(aesni, crypto, 1, 1, 1); static int intel_sha1_update(void *vctx, void *vdata, u_int datalen) { struct sha1_ctxt *ctx = vctx; const char *data = vdata; size_t gaplen; size_t gapstart; size_t off; size_t copysiz; u_int blocks; off = 0; /* Do any aligned blocks without redundant copying. */ if (datalen >= 64 && ctx->count % 64 == 0) { blocks = datalen / 64; ctx->c.b64[0] += blocks * 64 * 8; intel_sha1_step(ctx->h.b32, data + off, blocks); off += blocks * 64; } while (off < datalen) { gapstart = ctx->count % 64; gaplen = 64 - gapstart; copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); ctx->count += copysiz; ctx->count %= 64; ctx->c.b64[0] += copysiz * 8; if (ctx->count % 64 == 0) intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); off += copysiz; } return (0); } static void SHA1_Init_fn(void *ctx) { sha1_init(ctx); } static void SHA1_Finalize_fn(void *digest, void *ctx) { sha1_result(ctx, digest); } static int intel_sha256_update(void *vctx, void *vdata, u_int len) { SHA256_CTX *ctx = vctx; uint64_t bitlen; uint32_t r; u_int blocks; const unsigned char *src = vdata; /* Number of bytes left in the buffer from previous updates */ r = (ctx->count >> 3) & 0x3f; /* Convert the length into a number of bits */ bitlen = len << 3; /* Update number of bits */ ctx->count += bitlen; /* Handle the case where we don't need to perform any transforms */ if (len < 64 - r) { memcpy(&ctx->buf[r], src, len); return (0); } /* Finish the current block */ memcpy(&ctx->buf[r], src, 64 - r); intel_sha256_step(ctx->state, ctx->buf, 1); src += 64 - r; len -= 64 - r; /* Perform complete blocks */ if (len >= 64) { blocks = len / 64; intel_sha256_step(ctx->state, src, blocks); src += blocks * 64; len -= blocks * 64; } /* Copy left over data into buffer */ memcpy(ctx->buf, src, len); return (0); } static void SHA224_Init_fn(void *ctx) { SHA224_Init(ctx); } static void SHA224_Finalize_fn(void *digest, void *ctx) { SHA224_Final(digest, ctx); } static void SHA256_Init_fn(void *ctx) { SHA256_Init(ctx); } static void SHA256_Finalize_fn(void *digest, void *ctx) { SHA256_Final(digest, ctx); } static int aesni_authprepare(struct aesni_session *ses, int klen) { if (klen > SHA1_BLOCK_LEN) return (EINVAL); if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0)) return (EINVAL); return (0); } static int aesni_cipherprepare(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_AES_CBC: switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: CRYPTDEB("invalid CBC/ICM/GCM key length"); return (EINVAL); } break; case CRYPTO_AES_XTS: switch (csp->csp_cipher_klen * 8) { case 256: case 512: break; default: CRYPTDEB("invalid XTS key length"); return (EINVAL); } break; default: return (EINVAL); } return (0); } static int aesni_cipher_setup(struct aesni_session *ses, const struct crypto_session_params *csp) { struct fpu_kern_ctx *ctx; int kt, ctxidx, error; switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA1: ses->hash_len = SHA1_HASH_LEN; ses->hash_init = SHA1_Init_fn; ses->hash_update = intel_sha1_update; ses->hash_finalize = SHA1_Finalize_fn; break; case CRYPTO_SHA2_224_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA2_224: ses->hash_len = SHA2_224_HASH_LEN; ses->hash_init = SHA224_Init_fn; ses->hash_update = intel_sha256_update; ses->hash_finalize = SHA224_Finalize_fn; break; case CRYPTO_SHA2_256_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA2_256: ses->hash_len = SHA2_256_HASH_LEN; ses->hash_init = SHA256_Init_fn; ses->hash_update = intel_sha256_update; ses->hash_finalize = SHA256_Finalize_fn; break; } if (ses->hash_len != 0) { if (csp->csp_auth_mlen == 0) ses->mlen = ses->hash_len; else ses->mlen = csp->csp_auth_mlen; error = aesni_authprepare(ses, csp->csp_auth_klen); if (error != 0) return (error); } error = aesni_cipherprepare(csp); if (error != 0) return (error); kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } error = 0; if (csp->csp_cipher_key != NULL) aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key, csp->csp_cipher_klen); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; int error, ctxidx; bool kt; csp = crypto_get_params(crp->crp_session); switch (csp->csp_cipher_alg) { case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); break; case CRYPTO_AES_CBC: case CRYPTO_AES_XTS: /* CBC & XTS can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); break; } ctx = NULL; ctxidx = 0; error = 0; kt = is_fpu_kern_thread(0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } /* Do work */ if (csp->csp_mode == CSP_MODE_ETA) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = aesni_cipher_crypt(ses, crp, csp); if (error == 0) error = aesni_cipher_mac(ses, crp, csp); } else { error = aesni_cipher_mac(ses, crp, csp); if (error == 0) error = aesni_cipher_crypt(ses, crp, csp); } } else if (csp->csp_mode == CSP_MODE_DIGEST) error = aesni_cipher_mac(ses, crp, csp); else error = aesni_cipher_crypt(ses, crp, csp); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN]; uint8_t *authbuf, *buf, *outbuf; int error; bool encflag, allocated, authallocated, outallocated, outcopy; buf = aesni_cipher_alloc(crp, crp->crp_payload_start, crp->crp_payload_length, &allocated); if (buf == NULL) return (ENOMEM); outallocated = false; authallocated = false; authbuf = NULL; if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 || csp->csp_cipher_alg == CRYPTO_AES_CCM_16) { authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start, crp->crp_aad_length, &authallocated); if (authbuf == NULL) { error = ENOMEM; goto out; } } if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { outbuf = crypto_buffer_contiguous_subsegment(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length); if (outbuf == NULL) { outcopy = true; if (allocated) outbuf = buf; else { outbuf = malloc(crp->crp_payload_length, M_AESNI, M_NOWAIT); if (outbuf == NULL) { error = ENOMEM; goto out; } outallocated = true; } } else outcopy = false; } else { outbuf = buf; outcopy = allocated; } error = 0; encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); if (crp->crp_cipher_key != NULL) aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key, csp->csp_cipher_klen); crypto_read_iv(crp, iv); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, outbuf, iv); else { if (buf != outbuf) memcpy(outbuf, buf, crp->crp_payload_length); aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, outbuf, iv); } break; case CRYPTO_AES_ICM: /* encryption & decryption are the same */ aesni_encrypt_icm(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, outbuf, iv); break; case CRYPTO_AES_XTS: if (encflag) aesni_encrypt_xts(ses->rounds, ses->enc_schedule, ses->xts_schedule, crp->crp_payload_length, buf, outbuf, iv); else aesni_decrypt_xts(ses->rounds, ses->dec_schedule, ses->xts_schedule, crp->crp_payload_length, buf, outbuf, iv); break; case CRYPTO_AES_NIST_GCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); AES_GCM_encrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), tag); } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); if (!AES_GCM_decrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; case CRYPTO_AES_CCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); AES_CCM_encrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), tag); } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); if (!AES_CCM_decrypt(buf, outbuf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; } if (outcopy && error == 0) crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ? crp->crp_payload_output_start : crp->crp_payload_start, crp->crp_payload_length, outbuf); out: if (allocated) { explicit_bzero(buf, crp->crp_payload_length); free(buf, M_AESNI); } if (authallocated) { explicit_bzero(authbuf, crp->crp_aad_length); free(authbuf, M_AESNI); } if (outallocated) { explicit_bzero(outbuf, crp->crp_payload_length); free(outbuf, M_AESNI); } explicit_bzero(iv, sizeof(iv)); explicit_bzero(tag, sizeof(tag)); return (error); } static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { union { struct SHA256Context sha2 __aligned(16); struct sha1_ctxt sha1 __aligned(16); } sctx; uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; const uint8_t *key; int i, keylen; if (crp->crp_auth_key != NULL) key = crp->crp_auth_key; else key = csp->csp_auth_key; keylen = csp->csp_auth_klen; if (ses->hmac) { uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16); /* Inner hash: (K ^ IPAD) || data */ ses->hash_init(&sctx); for (i = 0; i < keylen; i++) hmac_key[i] = key[i] ^ HMAC_IPAD_VAL; for (i = keylen; i < sizeof(hmac_key); i++) hmac_key[i] = 0 ^ HMAC_IPAD_VAL; ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, ses->hash_update, &sctx); if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, ses->hash_update, &sctx); else crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, ses->hash_update, &sctx); ses->hash_finalize(res, &sctx); /* Outer hash: (K ^ OPAD) || inner hash */ ses->hash_init(&sctx); for (i = 0; i < keylen; i++) hmac_key[i] = key[i] ^ HMAC_OPAD_VAL; for (i = keylen; i < sizeof(hmac_key); i++) hmac_key[i] = 0 ^ HMAC_OPAD_VAL; ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); ses->hash_update(&sctx, res, ses->hash_len); ses->hash_finalize(res, &sctx); explicit_bzero(hmac_key, sizeof(hmac_key)); } else { ses->hash_init(&sctx); crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, ses->hash_update, &sctx); if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, ses->hash_update, &sctx); else crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, ses->hash_update, &sctx); ses->hash_finalize(res, &sctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)]; crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2); if (timingsafe_bcmp(res, res2, ses->mlen) != 0) return (EBADMSG); explicit_bzero(res2, sizeof(res2)); } else crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res); explicit_bzero(res, sizeof(res)); return (0); } diff --git a/sys/crypto/armv8/armv8_crypto.c b/sys/crypto/armv8/armv8_crypto.c index 21b3fa2f71dd..bc3e70f935b5 100644 --- a/sys/crypto/armv8/armv8_crypto.c +++ b/sys/crypto/armv8/armv8_crypto.c @@ -1,385 +1,385 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014,2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is based on the aesni code. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct armv8_crypto_softc { int dieing; int32_t cid; struct rwlock lock; }; static struct mtx *ctx_mtx; static struct fpu_kern_ctx **ctx_vfp; #define AQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_vfp[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int armv8_crypto_cipher_process(struct armv8_crypto_session *, struct cryptop *); MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data"); static void armv8_crypto_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "armv8crypto", -1) == NULL && BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0) panic("ARMv8 crypto: could not attach"); } static int armv8_crypto_probe(device_t dev) { uint64_t reg; int ret = ENXIO; reg = READ_SPECIALREG(id_aa64isar0_el1); switch (ID_AA64ISAR0_AES_VAL(reg)) { case ID_AA64ISAR0_AES_BASE: case ID_AA64ISAR0_AES_PMULL: ret = 0; break; } device_set_desc_copy(dev, "AES-CBC"); /* TODO: Check more fields as we support more features */ return (ret); } static int armv8_crypto_attach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); sc->dieing = 0; sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session), - CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } rw_init(&sc->lock, "armv8crypto"); ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_vfp[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW); } return (0); } static int armv8_crypto_detach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); rw_wlock(&sc->lock); sc->dieing = 1; rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); CPU_FOREACH(i) { if (ctx_vfp[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_vfp[i]); } ctx_vfp[i] = NULL; } free(ctx_mtx, M_ARMV8_CRYPTO); ctx_mtx = NULL; free(ctx_vfp, M_ARMV8_CRYPTO); ctx_vfp = NULL; return (0); } static int armv8_crypto_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (EINVAL); switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (EINVAL); } break; default: return (EINVAL); } default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void armv8_crypto_cipher_setup(struct armv8_crypto_session *ses, const struct crypto_session_params *csp) { int i; switch (csp->csp_cipher_klen * 8) { case 128: ses->rounds = AES128_ROUNDS; break; case 192: ses->rounds = AES192_ROUNDS; break; case 256: ses->rounds = AES256_ROUNDS; break; default: panic("invalid CBC key length"); } rijndaelKeySetupEnc(ses->enc_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); rijndaelKeySetupDec(ses->dec_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); for (i = 0; i < nitems(ses->enc_schedule); i++) { ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]); ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]); } } static int armv8_crypto_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct armv8_crypto_softc *sc; struct armv8_crypto_session *ses; sc = device_get_softc(dev); rw_wlock(&sc->lock); if (sc->dieing) { rw_wunlock(&sc->lock); return (EINVAL); } ses = crypto_get_driver_session(cses); armv8_crypto_cipher_setup(ses, csp); rw_wunlock(&sc->lock); return (0); } static int armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused) { struct armv8_crypto_session *ses; int error; /* We can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } ses = crypto_get_driver_session(crp->crp_session); error = armv8_crypto_cipher_process(ses, crp); out: crp->crp_etype = error; crypto_done(crp); return (error); } static uint8_t * armv8_crypto_cipher_alloc(struct cryptop *crp, int *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start, crp->crp_payload_length); if (addr != NULL) { *allocated = 0; return (addr); } addr = malloc(crp->crp_payload_length, M_ARMV8_CRYPTO, M_NOWAIT); if (addr != NULL) { *allocated = 1; crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, addr); } else *allocated = 0; return (addr); } static int armv8_crypto_cipher_process(struct armv8_crypto_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; uint8_t *buf; uint8_t iv[AES_BLOCK_LEN]; int allocated, i; int encflag; int kt; csp = crypto_get_params(crp->crp_session); encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); buf = armv8_crypto_cipher_alloc(crp, &allocated); if (buf == NULL) return (ENOMEM); kt = is_fpu_kern_thread(0); if (!kt) { AQUIRE_CTX(i, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if (crp->crp_cipher_key != NULL) { panic("armv8: new cipher key"); } crypto_read_iv(crp, iv); /* Do work */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); else armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, buf, iv); break; } if (allocated) crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, buf); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(i, ctx); } if (allocated) { bzero(buf, crp->crp_payload_length); free(buf, M_ARMV8_CRYPTO); } return (0); } static device_method_t armv8_crypto_methods[] = { DEVMETHOD(device_identify, armv8_crypto_identify), DEVMETHOD(device_probe, armv8_crypto_probe), DEVMETHOD(device_attach, armv8_crypto_attach), DEVMETHOD(device_detach, armv8_crypto_detach), DEVMETHOD(cryptodev_probesession, armv8_crypto_probesession), DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession), DEVMETHOD(cryptodev_process, armv8_crypto_process), DEVMETHOD_END, }; static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods, sizeof(struct armv8_crypto_softc)); static devclass_t armv8_crypto_devclass; DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass, 0, 0); diff --git a/sys/crypto/blake2/blake2_cryptodev.c b/sys/crypto/blake2/blake2_cryptodev.c index 262823b5a758..065f53734e54 100644 --- a/sys/crypto/blake2/blake2_cryptodev.c +++ b/sys/crypto/blake2/blake2_cryptodev.c @@ -1,412 +1,413 @@ /*- * Copyright (c) 2018 Conrad Meyer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__amd64__) #include #elif defined(__i386__) #include #endif struct blake2_session { size_t mlen; }; CTASSERT((size_t)BLAKE2B_KEYBYTES > (size_t)BLAKE2S_KEYBYTES); struct blake2_softc { bool dying; int32_t cid; struct rwlock lock; }; static struct mtx_padalign *ctx_mtx; static struct fpu_kern_ctx **ctx_fpu; #define ACQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_fpu[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int blake2_cipher_setup(struct blake2_session *ses, const struct crypto_session_params *csp); static int blake2_cipher_process(struct blake2_session *ses, struct cryptop *crp); MALLOC_DEFINE(M_BLAKE2, "blake2_data", "Blake2 Data"); static void blake2_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "blaketwo", -1) == NULL && BUS_ADD_CHILD(parent, 10, "blaketwo", -1) == 0) panic("blaketwo: could not attach"); } static int blake2_probe(device_t dev) { device_set_desc(dev, "Blake2"); return (0); } static void blake2_cleanctx(void) { int i; /* XXX - no way to return driverid */ CPU_FOREACH(i) { if (ctx_fpu[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_fpu[i]); } ctx_fpu[i] = NULL; } free(ctx_mtx, M_BLAKE2); ctx_mtx = NULL; free(ctx_fpu, M_BLAKE2); ctx_fpu = NULL; } static int blake2_attach(device_t dev) { struct blake2_softc *sc; int i; sc = device_get_softc(dev); sc->dying = false; sc->cid = crypto_get_driverid(dev, sizeof(struct blake2_session), - CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | + CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_BLAKE2, M_WAITOK | M_ZERO); ctx_fpu = malloc(sizeof(*ctx_fpu) * (mp_maxid + 1), M_BLAKE2, M_WAITOK | M_ZERO); CPU_FOREACH(i) { ctx_fpu[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "bl2fpumtx", NULL, MTX_DEF | MTX_NEW); } rw_init(&sc->lock, "blake2_lock"); return (0); } static int blake2_detach(device_t dev) { struct blake2_softc *sc; sc = device_get_softc(dev); rw_wlock(&sc->lock); sc->dying = true; rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); blake2_cleanctx(); return (0); } static int blake2_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: switch (csp->csp_auth_alg) { case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: break; default: return (EINVAL); } break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static int blake2_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct blake2_softc *sc; struct blake2_session *ses; int error; sc = device_get_softc(dev); ses = crypto_get_driver_session(cses); rw_rlock(&sc->lock); if (sc->dying) { rw_runlock(&sc->lock); return (EINVAL); } rw_runlock(&sc->lock); error = blake2_cipher_setup(ses, csp); if (error != 0) { CRYPTDEB("setup failed"); return (error); } return (0); } static int blake2_process(device_t dev, struct cryptop *crp, int hint __unused) { struct blake2_session *ses; int error; ses = crypto_get_driver_session(crp->crp_session); error = blake2_cipher_process(ses, crp); crp->crp_etype = error; crypto_done(crp); return (0); } static device_method_t blake2_methods[] = { DEVMETHOD(device_identify, blake2_identify), DEVMETHOD(device_probe, blake2_probe), DEVMETHOD(device_attach, blake2_attach), DEVMETHOD(device_detach, blake2_detach), DEVMETHOD(cryptodev_probesession, blake2_probesession), DEVMETHOD(cryptodev_newsession, blake2_newsession), DEVMETHOD(cryptodev_process, blake2_process), DEVMETHOD_END }; static driver_t blake2_driver = { "blaketwo", blake2_methods, sizeof(struct blake2_softc), }; static devclass_t blake2_devclass; DRIVER_MODULE(blake2, nexus, blake2_driver, blake2_devclass, 0, 0); MODULE_VERSION(blake2, 1); MODULE_DEPEND(blake2, crypto, 1, 1, 1); static bool blake2_check_klen(const struct crypto_session_params *csp, unsigned klen) { if (csp->csp_auth_alg == CRYPTO_BLAKE2S) return (klen <= BLAKE2S_KEYBYTES); else return (klen <= BLAKE2B_KEYBYTES); } static int blake2_cipher_setup(struct blake2_session *ses, const struct crypto_session_params *csp) { int hashlen; CTASSERT((size_t)BLAKE2S_OUTBYTES <= (size_t)BLAKE2B_OUTBYTES); if (!blake2_check_klen(csp, csp->csp_auth_klen)) return (EINVAL); if (csp->csp_auth_mlen < 0) return (EINVAL); switch (csp->csp_auth_alg) { case CRYPTO_BLAKE2S: hashlen = BLAKE2S_OUTBYTES; break; case CRYPTO_BLAKE2B: hashlen = BLAKE2B_OUTBYTES; break; default: return (EINVAL); } if (csp->csp_auth_mlen > hashlen) return (EINVAL); if (csp->csp_auth_mlen == 0) ses->mlen = hashlen; else ses->mlen = csp->csp_auth_mlen; return (0); } static int blake2b_applicator(void *state, void *buf, u_int len) { int rc; rc = blake2b_update(state, buf, len); if (rc != 0) return (EINVAL); return (0); } static int blake2s_applicator(void *state, void *buf, u_int len) { int rc; rc = blake2s_update(state, buf, len); if (rc != 0) return (EINVAL); return (0); } static int blake2_cipher_process(struct blake2_session *ses, struct cryptop *crp) { union { blake2b_state sb; blake2s_state ss; } bctx; char res[BLAKE2B_OUTBYTES], res2[BLAKE2B_OUTBYTES]; const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; const void *key; int ctxidx; bool kt; int error, rc; unsigned klen; ctx = NULL; ctxidx = 0; error = EINVAL; kt = is_fpu_kern_thread(0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } csp = crypto_get_params(crp->crp_session); if (crp->crp_auth_key != NULL) key = crp->crp_auth_key; else key = csp->csp_auth_key; klen = csp->csp_auth_klen; switch (csp->csp_auth_alg) { case CRYPTO_BLAKE2B: if (klen > 0) rc = blake2b_init_key(&bctx.sb, ses->mlen, key, klen); else rc = blake2b_init(&bctx.sb, ses->mlen); if (rc != 0) goto out; error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, blake2b_applicator, &bctx.sb); if (error != 0) goto out; rc = blake2b_final(&bctx.sb, res, ses->mlen); if (rc != 0) { error = EINVAL; goto out; } break; case CRYPTO_BLAKE2S: if (klen > 0) rc = blake2s_init_key(&bctx.ss, ses->mlen, key, klen); else rc = blake2s_init(&bctx.ss, ses->mlen); if (rc != 0) goto out; error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, blake2s_applicator, &bctx.ss); if (error != 0) goto out; rc = blake2s_final(&bctx.ss, res, ses->mlen); if (rc != 0) { error = EINVAL; goto out; } break; default: panic("unreachable"); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2); if (timingsafe_bcmp(res, res2, ses->mlen) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res); out: if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } diff --git a/sys/crypto/via/padlock.c b/sys/crypto/via/padlock.c index e0cd452bc431..2b26b14c6461 100644 --- a/sys/crypto/via/padlock.c +++ b/sys/crypto/via/padlock.c @@ -1,304 +1,305 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #include #include #endif #include #include #include #include #include "cryptodev_if.h" /* * Technical documentation about the PadLock engine can be found here: * * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf */ struct padlock_softc { int32_t sc_cid; }; static int padlock_probesession(device_t, const struct crypto_session_params *); static int padlock_newsession(device_t, crypto_session_t cses, const struct crypto_session_params *); static void padlock_freesession(device_t, crypto_session_t cses); static void padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses); static int padlock_process(device_t, struct cryptop *crp, int hint __unused); MALLOC_DEFINE(M_PADLOCK, "padlock_data", "PadLock Data"); static void padlock_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "padlock", -1) == NULL && BUS_ADD_CHILD(parent, 10, "padlock", -1) == 0) panic("padlock: could not attach"); } static int padlock_probe(device_t dev) { char capp[256]; #if defined(__amd64__) || defined(__i386__) /* If there is no AES support, we has nothing to do here. */ if (!(via_feature_xcrypt & VIA_HAS_AES)) { device_printf(dev, "No ACE support.\n"); return (EINVAL); } strlcpy(capp, "AES-CBC", sizeof(capp)); #if 0 strlcat(capp, ",AES-EBC", sizeof(capp)); strlcat(capp, ",AES-CFB", sizeof(capp)); strlcat(capp, ",AES-OFB", sizeof(capp)); #endif if (via_feature_xcrypt & VIA_HAS_SHA) { strlcat(capp, ",SHA1", sizeof(capp)); strlcat(capp, ",SHA256", sizeof(capp)); } #if 0 if (via_feature_xcrypt & VIA_HAS_AESCTR) strlcat(capp, ",AES-CTR", sizeof(capp)); if (via_feature_xcrypt & VIA_HAS_MM) strlcat(capp, ",RSA", sizeof(capp)); #endif device_set_desc_copy(dev, capp); return (0); #else return (EINVAL); #endif } static int padlock_attach(device_t dev) { struct padlock_softc *sc = device_get_softc(dev); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct padlock_session), - CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | + CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->sc_cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } return (0); } static int padlock_detach(device_t dev) { struct padlock_softc *sc = device_get_softc(dev); crypto_unregister_all(sc->sc_cid); return (0); } static int padlock_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); /* * We only support HMAC algorithms to be able to work with * ipsec(4), so if we are asked only for authentication without * encryption, don't pretend we can accelerate it. * * XXX: For CPUs with SHA instructions we should probably * permit CSP_MODE_DIGEST so that those can be tested. */ switch (csp->csp_mode) { case CSP_MODE_ETA: if (!padlock_hash_check(csp)) return (EINVAL); /* FALLTHROUGH */ case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (EINVAL); break; default: return (EINVAL); } break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static int padlock_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct padlock_softc *sc = device_get_softc(dev); struct padlock_session *ses = NULL; struct thread *td; int error; ses = crypto_get_driver_session(cses); ses->ses_fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL); error = padlock_cipher_setup(ses, csp); if (error != 0) { padlock_freesession_one(sc, ses); return (error); } if (csp->csp_mode == CSP_MODE_ETA) { td = curthread; fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); error = padlock_hash_setup(ses, csp); fpu_kern_leave(td, ses->ses_fpu_ctx); if (error != 0) { padlock_freesession_one(sc, ses); return (error); } } return (0); } static void padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses) { padlock_hash_free(ses); fpu_kern_free_ctx(ses->ses_fpu_ctx); } static void padlock_freesession(device_t dev, crypto_session_t cses) { struct padlock_softc *sc = device_get_softc(dev); struct padlock_session *ses; ses = crypto_get_driver_session(cses); padlock_freesession_one(sc, ses); } static int padlock_process(device_t dev, struct cryptop *crp, int hint __unused) { const struct crypto_session_params *csp; struct padlock_session *ses; int error; if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* Perform data authentication if requested before decryption. */ if (csp->csp_mode == CSP_MODE_ETA && !CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = padlock_hash_process(ses, crp, csp); if (error != 0) goto out; } error = padlock_cipher_process(ses, crp, csp); if (error != 0) goto out; /* Perform data authentication if requested after encryption. */ if (csp->csp_mode == CSP_MODE_ETA && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = padlock_hash_process(ses, crp, csp); if (error != 0) goto out; } out: #if 0 /* * This code is not necessary, because contexts will be freed on next * padlock_setup_mackey() call or at padlock_freesession() call. */ if (ses != NULL && maccrd != NULL && (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { padlock_free_ctx(ses->ses_axf, ses->ses_ictx); padlock_free_ctx(ses->ses_axf, ses->ses_octx); } #endif crp->crp_etype = error; crypto_done(crp); return (error); } static device_method_t padlock_methods[] = { DEVMETHOD(device_identify, padlock_identify), DEVMETHOD(device_probe, padlock_probe), DEVMETHOD(device_attach, padlock_attach), DEVMETHOD(device_detach, padlock_detach), DEVMETHOD(cryptodev_probesession, padlock_probesession), DEVMETHOD(cryptodev_newsession, padlock_newsession), DEVMETHOD(cryptodev_freesession,padlock_freesession), DEVMETHOD(cryptodev_process, padlock_process), {0, 0}, }; static driver_t padlock_driver = { "padlock", padlock_methods, sizeof(struct padlock_softc), }; static devclass_t padlock_devclass; /* XXX where to attach */ DRIVER_MODULE(padlock, nexus, padlock_driver, padlock_devclass, 0, 0); MODULE_VERSION(padlock, 1); MODULE_DEPEND(padlock, crypto, 1, 1, 1); diff --git a/sys/geom/eli/g_eli.c b/sys/geom/eli/g_eli.c index 2a7076c0fd28..8e7433f23594 100644 --- a/sys/geom/eli/g_eli.c +++ b/sys/geom/eli/g_eli.c @@ -1,1451 +1,1457 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2019 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(geom_eli, "GEOM crypto module"); MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GEOM_ELI stuff"); static int g_eli_version = G_ELI_VERSION; SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, "GELI version"); int g_eli_debug = 0; SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, "Debug level"); static u_int g_eli_tries = 3; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, "Number of tries for entering the passphrase"); static u_int g_eli_visible_passphrase = GETS_NOECHO; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, &g_eli_visible_passphrase, 0, "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); u_int g_eli_overwrites = G_ELI_OVERWRITES; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 0, "Number of times on-disk keys should be overwritten when destroying them"); static u_int g_eli_threads = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, "Number of threads doing crypto work"); u_int g_eli_batch = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, "Use crypto operations batching"); /* * Passphrase cached during boot, in order to be more user-friendly if * there are multiple providers using the same passphrase. */ static char cached_passphrase[256]; static u_int g_eli_boot_passcache = 1; TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, &g_eli_boot_passcache, 0, "Passphrases are cached during boot process for possible reuse"); static void fetch_loader_passphrase(void * dummy) { char * env_passphrase; KASSERT(dynamic_kenv, ("need dynamic kenv")); if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { /* Extract passphrase from the environment. */ strlcpy(cached_passphrase, env_passphrase, sizeof(cached_passphrase)); freeenv(env_passphrase); /* Wipe the passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); } } SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, fetch_loader_passphrase, NULL); static void zero_boot_passcache(void) { explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); } static void zero_geli_intake_keys(void) { struct keybuf *keybuf; int i; if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, clear all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { explicit_bzero(keybuf->kb_ents[i].ke_data, sizeof(keybuf->kb_ents[i].ke_data)); keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; } } } } static void zero_intake_passcache(void *dummy) { zero_boot_passcache(); zero_geli_intake_keys(); } EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); static eventhandler_tag g_eli_pre_sync = NULL; static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, off_t offset, struct g_eli_metadata *md); static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp); static void g_eli_init(struct g_class *mp); static void g_eli_fini(struct g_class *mp); static g_taste_t g_eli_taste; static g_dumpconf_t g_eli_dumpconf; struct g_class g_eli_class = { .name = G_ELI_CLASS_NAME, .version = G_VERSION, .ctlreq = g_eli_config, .taste = g_eli_taste, .destroy_geom = g_eli_destroy_geom, .init = g_eli_init, .fini = g_eli_fini }; /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ /* * EAGAIN from crypto(9) means, that we were probably balanced to another crypto * accelerator or something like this. * The function updates the SID and rerun the operation. */ int g_eli_crypto_rerun(struct cryptop *crp) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; bp = (struct bio *)crp->crp_opaque; sc = bp->bio_to->geom->softc; LIST_FOREACH(wr, &sc->sc_workers, w_next) { if (wr->w_number == bp->bio_pflags) break; } KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).", bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid, crp->crp_session); wr->w_sid = crp->crp_session; crp->crp_etype = 0; error = crypto_dispatch(crp); if (error == 0) return (0); G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); crp->crp_etype = error; return (error); } static void g_eli_getattr_done(struct bio *bp) { if (bp->bio_error == 0 && !strcmp(bp->bio_attribute, "GEOM::physpath")) { strlcat(bp->bio_data, "/eli", bp->bio_length); } g_std_done(bp); } /* * The function is called afer reading encrypted data from the provider. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_read_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; sc = pbp->bio_to->geom->softc; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; if (pbp->bio_driver2 != NULL) { free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; } g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); return; } mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, pbp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); } /* * The function is called after we encrypt and write data. * * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver */ void g_eli_write_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; } else pbp->bio_completed = pbp->bio_length; /* * Write is finished, send it up. */ sc = pbp->bio_to->geom->softc; g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); } /* * This function should never be called, but GEOM made as it set ->orphan() * method for every geom. */ static void g_eli_orphan_spoil_assert(struct g_consumer *cp) { panic("Function %s() called for %s.", __func__, cp->geom->name); } static void g_eli_orphan(struct g_consumer *cp) { struct g_eli_softc *sc; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; g_eli_destroy(sc, TRUE); } static void g_eli_resize(struct g_consumer *cp) { struct g_eli_softc *sc; struct g_provider *epp, *pp; off_t oldsize; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; if ((sc->sc_flags & G_ELI_FLAG_AUTORESIZE) == 0) { G_ELI_DEBUG(0, "Autoresize is turned off, old size: %jd.", (intmax_t)sc->sc_provsize); return; } pp = cp->provider; if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) { struct g_eli_metadata md; u_char *sector; int error; sector = NULL; error = g_eli_read_metadata_offset(cp->geom->class, pp, sc->sc_provsize - pp->sectorsize, &md); if (error != 0) { G_ELI_DEBUG(0, "Cannot read metadata from %s (error=%d).", pp->name, error); goto iofail; } md.md_provsize = pp->mediasize; sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO); eli_metadata_encode(&md, sector); error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector, pp->sectorsize); if (error != 0) { G_ELI_DEBUG(0, "Cannot store metadata on %s (error=%d).", pp->name, error); goto iofail; } explicit_bzero(sector, pp->sectorsize); error = g_write_data(cp, sc->sc_provsize - pp->sectorsize, sector, pp->sectorsize); if (error != 0) { G_ELI_DEBUG(0, "Cannot clear old metadata from %s (error=%d).", pp->name, error); goto iofail; } iofail: explicit_bzero(&md, sizeof(md)); if (sector != NULL) { explicit_bzero(sector, pp->sectorsize); free(sector, M_ELI); } } oldsize = sc->sc_mediasize; sc->sc_mediasize = eli_mediasize(sc, pp->mediasize, pp->sectorsize); g_eli_key_resize(sc); sc->sc_provsize = pp->mediasize; epp = LIST_FIRST(&sc->sc_geom->provider); g_resize_provider(epp, sc->sc_mediasize); G_ELI_DEBUG(0, "Device %s size changed from %jd to %jd.", epp->name, (intmax_t)oldsize, (intmax_t)sc->sc_mediasize); } /* * BIO_READ: * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ static void g_eli_start(struct bio *bp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *cbp; sc = bp->bio_to->geom->softc; KASSERT(sc != NULL, ("Provider's error should be set (error=%d)(device=%s).", bp->bio_to->error, bp->bio_to->name)); G_ELI_LOGREQ(2, bp, "Request received."); switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_GETATTR: case BIO_FLUSH: case BIO_ZONE: case BIO_SPEEDUP: break; case BIO_DELETE: /* * If the user hasn't set the NODELETE flag, we just pass * it down the stack and let the layers beneath us do (or * not) whatever they do with it. If they have, we * reject it. A possible extension would be an * additional flag to take it as a hint to shred the data * with [multiple?] overwrites. */ if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) break; default: g_io_deliver(bp, EOPNOTSUPP); return; } cbp = g_clone_bio(bp); if (cbp == NULL) { g_io_deliver(bp, ENOMEM); return; } bp->bio_driver1 = cbp; bp->bio_pflags = G_ELI_NEW_BIO; switch (bp->bio_cmd) { case BIO_READ: if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { g_eli_crypto_read(sc, bp, 0); break; } /* FALLTHROUGH */ case BIO_WRITE: mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); break; case BIO_GETATTR: case BIO_FLUSH: case BIO_DELETE: case BIO_SPEEDUP: case BIO_ZONE: if (bp->bio_cmd == BIO_GETATTR) cbp->bio_done = g_eli_getattr_done; else cbp->bio_done = g_std_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); break; } } static int g_eli_newsession(struct g_eli_worker *wr) { struct g_eli_softc *sc; struct crypto_session_params csp; - int error; + uint32_t caps; + int error, new_crypto; void *key; sc = wr->w_softc; memset(&csp, 0, sizeof(csp)); csp.csp_mode = CSP_MODE_CIPHER; csp.csp_cipher_alg = sc->sc_ealgo; csp.csp_ivlen = g_eli_ivlen(sc->sc_ealgo); csp.csp_cipher_klen = sc->sc_ekeylen / 8; if (sc->sc_ealgo == CRYPTO_AES_XTS) csp.csp_cipher_klen <<= 1; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { key = g_eli_key_hold(sc, 0, LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); csp.csp_cipher_key = key; } else { key = NULL; csp.csp_cipher_key = sc->sc_ekey; } if (sc->sc_flags & G_ELI_FLAG_AUTH) { csp.csp_mode = CSP_MODE_ETA; csp.csp_auth_alg = sc->sc_aalgo; csp.csp_auth_klen = G_ELI_AUTH_SECKEYLEN; } switch (sc->sc_crypto) { + case G_ELI_CRYPTO_SW_ACCEL: case G_ELI_CRYPTO_SW: error = crypto_newsession(&wr->w_sid, &csp, CRYPTOCAP_F_SOFTWARE); break; case G_ELI_CRYPTO_HW: error = crypto_newsession(&wr->w_sid, &csp, CRYPTOCAP_F_HARDWARE); break; case G_ELI_CRYPTO_UNKNOWN: error = crypto_newsession(&wr->w_sid, &csp, - CRYPTOCAP_F_HARDWARE); + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); if (error == 0) { + caps = crypto_ses2caps(wr->w_sid); + if (caps & CRYPTOCAP_F_HARDWARE) + new_crypto = G_ELI_CRYPTO_HW; + else if (caps & CRYPTOCAP_F_ACCEL_SOFTWARE) + new_crypto = G_ELI_CRYPTO_SW_ACCEL; + else + new_crypto = G_ELI_CRYPTO_SW; mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) - sc->sc_crypto = G_ELI_CRYPTO_HW; - mtx_unlock(&sc->sc_queue_mtx); - } else { - error = crypto_newsession(&wr->w_sid, &csp, - CRYPTOCAP_F_SOFTWARE); - mtx_lock(&sc->sc_queue_mtx); - if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) - sc->sc_crypto = G_ELI_CRYPTO_SW; + sc->sc_crypto = new_crypto; mtx_unlock(&sc->sc_queue_mtx); } break; default: panic("%s: invalid condition", __func__); } if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { if (error) g_eli_key_drop(sc, key); else wr->w_first_key = key; } return (error); } static void g_eli_freesession(struct g_eli_worker *wr) { struct g_eli_softc *sc; crypto_freesession(wr->w_sid); if (wr->w_first_key != NULL) { sc = wr->w_softc; g_eli_key_drop(sc, wr->w_first_key); wr->w_first_key = NULL; } } static void g_eli_cancel(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, ("Not new bio when canceling (bp=%p).", bp)); g_io_deliver(bp, ENXIO); } } static struct bio * g_eli_takefirst(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) return (bioq_takefirst(&sc->sc_queue)); /* * Device suspended, so we skip new I/O requests. */ TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { if (bp->bio_pflags != G_ELI_NEW_BIO) break; } if (bp != NULL) bioq_remove(&sc->sc_queue, bp); return (bp); } /* * This is the main function for kernel worker thread when we don't have * hardware acceleration and we have to do cryptography in software. * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM * threads with crypto work. */ static void g_eli_worker(void *arg) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; wr = arg; sc = wr->w_softc; #ifdef EARLY_AP_STARTUP MPASS(!sc->sc_cpubind || smp_started); #elif defined(SMP) /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ if (sc->sc_cpubind) { while (!smp_started) tsleep(wr, 0, "geli:smp", hz / 4); } #endif thread_lock(curthread); sched_prio(curthread, PUSER); if (sc->sc_cpubind) sched_bind(curthread, wr->w_number % mp_ncpus); thread_unlock(curthread); G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); for (;;) { mtx_lock(&sc->sc_queue_mtx); again: bp = g_eli_takefirst(sc); if (bp == NULL) { if (sc->sc_flags & G_ELI_FLAG_DESTROY) { g_eli_cancel(sc); LIST_REMOVE(wr, w_next); g_eli_freesession(wr); free(wr, M_ELI); G_ELI_DEBUG(1, "Thread %s exiting.", curthread->td_proc->p_comm); wakeup(&sc->sc_workers); mtx_unlock(&sc->sc_queue_mtx); kproc_exit(0); } while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { if (sc->sc_inflight > 0) { G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight); /* * We still have inflight BIOs, so * sleep and retry. */ msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:inf", hz / 5); goto again; } /* * Suspend requested, mark the worker as * suspended and go to sleep. */ if (wr->w_active) { g_eli_freesession(wr); wr->w_active = FALSE; } wakeup(&sc->sc_workers); msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:suspend", 0); if (!wr->w_active && !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { error = g_eli_newsession(wr); KASSERT(error == 0, ("g_eli_newsession() failed on resume (error=%d)", error)); wr->w_active = TRUE; } goto again; } msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); continue; } if (bp->bio_pflags == G_ELI_NEW_BIO) atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); if (bp->bio_pflags == G_ELI_NEW_BIO) { bp->bio_pflags = 0; if (sc->sc_flags & G_ELI_FLAG_AUTH) { if (bp->bio_cmd == BIO_READ) g_eli_auth_read(sc, bp); else g_eli_auth_run(wr, bp); } else { if (bp->bio_cmd == BIO_READ) g_eli_crypto_read(sc, bp, 1); else g_eli_crypto_run(wr, bp); } } else { if (sc->sc_flags & G_ELI_FLAG_AUTH) g_eli_auth_run(wr, bp); else g_eli_crypto_run(wr, bp); } } } static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, off_t offset, struct g_eli_metadata *md) { struct g_geom *gp; struct g_consumer *cp; u_char *buf = NULL; int error; g_topology_assert(); gp = g_new_geomf(mp, "eli:taste"); gp->start = g_eli_start; gp->access = g_std_access; /* * g_eli_read_metadata() is always called from the event thread. * Our geom is created and destroyed in the same event, so there * could be no orphan nor spoil event in the meantime. */ gp->orphan = g_eli_orphan_spoil_assert; gp->spoiled = g_eli_orphan_spoil_assert; cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error != 0) goto end; error = g_access(cp, 1, 0, 0); if (error != 0) goto end; g_topology_unlock(); buf = g_read_data(cp, offset, pp->sectorsize, &error); g_topology_lock(); if (buf == NULL) goto end; error = eli_metadata_decode(buf, md); if (error != 0) goto end; /* Metadata was read and decoded successfully. */ end: if (buf != NULL) g_free(buf); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, 0, 0); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); return (error); } int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, struct g_eli_metadata *md) { return (g_eli_read_metadata_offset(mp, pp, pp->mediasize - pp->sectorsize, md)); } /* * The function is called when we had last close on provider and user requested * to close it when this situation occur. */ static void g_eli_last_close(void *arg, int flags __unused) { struct g_geom *gp; char gpname[64]; int error; g_topology_assert(); gp = arg; strlcpy(gpname, gp->name, sizeof(gpname)); error = g_eli_destroy(gp->softc, TRUE); KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", gpname, error)); G_ELI_DEBUG(0, "Detached %s on last close.", gpname); } int g_eli_access(struct g_provider *pp, int dr, int dw, int de) { struct g_eli_softc *sc; struct g_geom *gp; gp = pp->geom; sc = gp->softc; if (dw > 0) { if (sc->sc_flags & G_ELI_FLAG_RO) { /* Deny write attempts. */ return (EROFS); } /* Someone is opening us for write, we need to remember that. */ sc->sc_flags |= G_ELI_FLAG_WOPEN; return (0); } /* Is this the last close? */ if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) return (0); /* * Automatically detach on last close if requested. */ if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || (sc->sc_flags & G_ELI_FLAG_WOPEN)) { g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); } return (0); } static int g_eli_cpu_is_disabled(int cpu) { #ifdef SMP return (CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (0); #endif } struct g_geom * g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, const struct g_eli_metadata *md, const u_char *mkey, int nkey) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; struct g_geom_alias *gap; u_int i, threads; int dcw, error; G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); KASSERT(eli_metadata_crypto_supported(md), ("%s: unsupported crypto for %s", __func__, bpp->name)); gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); gp->start = g_eli_start; /* * Spoiling can happen even though we have the provider open * exclusively, e.g. through media change events. */ gp->spoiled = g_eli_orphan; gp->orphan = g_eli_orphan; gp->resize = g_eli_resize; gp->dumpconf = g_eli_dumpconf; /* * If detach-on-last-close feature is not enabled and we don't operate * on read-only provider, we can simply use g_std_access(). */ if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) gp->access = g_eli_access; else gp->access = g_std_access; eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); sc->sc_nkey = nkey; gp->softc = sc; sc->sc_geom = gp; bioq_init(&sc->sc_queue); mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); pp = NULL; cp = g_new_consumer(gp); error = g_attach(cp, bpp); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot attach to %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", bpp->name, error); } goto failed; } /* * Keep provider open all the time, so we can run critical tasks, * like Master Keys deletion, without wondering if we can open * provider or not. * We don't open provider for writing only when user requested read-only * access. */ dcw = (sc->sc_flags & G_ELI_FLAG_RO) ? 0 : 1; error = g_access(cp, 1, dcw, 1); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot access %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot access %s (error=%d).", bpp->name, error); } goto failed; } /* * Remember the keys in our softc structure. */ g_eli_mkey_propagate(sc, mkey); LIST_INIT(&sc->sc_workers); threads = g_eli_threads; if (threads == 0) threads = mp_ncpus; sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); for (i = 0; i < threads; i++) { if (g_eli_cpu_is_disabled(i)) { G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", bpp->name, i); continue; } wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); wr->w_softc = sc; wr->w_number = i; wr->w_active = TRUE; error = g_eli_newsession(wr); if (error != 0) { free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } goto failed; } error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, "g_eli[%u] %s", i, bpp->name); if (error != 0) { g_eli_freesession(wr); free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } goto failed; } LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); } /* * Create decrypted provider. */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; LIST_FOREACH(gap, &bpp->aliases, ga_next) g_provider_add_alias(pp, "%s%s", gap->ga_alias, G_ELI_SUFFIX); g_error_provider(pp, 0); G_ELI_DEBUG(0, "Device %s created.", pp->name); G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), sc->sc_ekeylen); if (sc->sc_flags & G_ELI_FLAG_AUTH) G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); G_ELI_DEBUG(0, " Crypto: %s", + sc->sc_crypto == G_ELI_CRYPTO_SW_ACCEL ? "accelerated software" : sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); return (gp); failed: mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); /* * Wait for kernel threads self destruction. */ while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, -dcw, -1); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); return (NULL); } int g_eli_destroy(struct g_eli_softc *sc, boolean_t force) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (sc == NULL) return (ENXIO); gp = sc->sc_geom; pp = LIST_FIRST(&gp->provider); if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { if (force) { G_ELI_DEBUG(1, "Device %s is still open, so it " "cannot be definitely removed.", pp->name); sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; g_wither_provider(pp, ENXIO); return (EBUSY); } else { G_ELI_DEBUG(1, "Device %s is still open (r%dw%de%d).", pp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } } mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); gp->softc = NULL; g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); g_wither_geom_close(gp, ENXIO); return (0); } static int g_eli_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, struct g_geom *gp) { struct g_eli_softc *sc; sc = gp->softc; return (g_eli_destroy(sc, FALSE)); } static int g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) { u_char *keyfile, *data; char *file, name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL && i == 0) { /* * If there is only one keyfile, allow simpler name. */ snprintf(name, sizeof(name), "%s:geli_keyfile", provider); keyfile = preload_search_by_type(name); } if (keyfile == NULL) return (i); /* Return number of loaded keyfiles. */ data = preload_fetch_addr(keyfile); if (data == NULL) { G_ELI_DEBUG(0, "Cannot find key file data for %s.", name); return (0); } size = preload_fetch_size(keyfile); if (size == 0) { G_ELI_DEBUG(0, "Cannot find key file size for %s.", name); return (0); } file = preload_search_info(keyfile, MODINFO_NAME); if (file == NULL) { G_ELI_DEBUG(0, "Cannot find key file name for %s.", name); return (0); } G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, provider, name); g_eli_crypto_hmac_update(ctx, data, size); } } static void g_eli_keyfiles_clear(const char *provider) { u_char *keyfile, *data; char name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL) return; data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data != NULL && size != 0) bzero(data, size); } } /* * Tasting is only made on boot. * We detect providers which should be attached before root is mounted. */ static struct g_geom * g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_eli_metadata md; struct g_geom *gp; struct hmac_ctx ctx; char passphrase[256]; u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; u_int i, nkey, nkeyfiles, tries, showpass; int error; struct keybuf *keybuf; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); if (root_mounted() || g_eli_tries == 0) return (NULL); G_ELI_DEBUG(3, "Tasting %s.", pp->name); error = g_eli_read_metadata(mp, pp, &md); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) return (NULL); if (md.md_version > G_ELI_VERSION) { printf("geom_eli.ko module is too old to handle %s.\n", pp->name); return (NULL); } if (md.md_provsize != pp->mediasize) return (NULL); /* Should we attach it on boot? */ if (!(md.md_flags & G_ELI_FLAG_BOOT) && !(md.md_flags & G_ELI_FLAG_GELIBOOT)) return (NULL); if (md.md_keys == 0x00) { G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); return (NULL); } if (!eli_metadata_crypto_supported(&md)) { G_ELI_DEBUG(0, "%s uses invalid or unsupported algorithms\n", pp->name); return (NULL); } if (md.md_iterations == -1) { /* If there is no passphrase, we try only once. */ tries = 1; } else { /* Ask for the passphrase no more than g_eli_tries times. */ tries = g_eli_tries; } if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, try all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { memcpy(key, keybuf->kb_ents[i].ke_data, sizeof(key)); if (g_eli_mkey_decrypt_any(&md, key, mkey, &nkey) == 0 ) { explicit_bzero(key, sizeof(key)); goto have_key; } } } } for (i = 0; i <= tries; i++) { g_eli_crypto_hmac_init(&ctx, NULL, 0); /* * Load all key files. */ nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); if (nkeyfiles == 0 && md.md_iterations == -1) { /* * No key files and no passphrase, something is * definitely wrong here. * geli(8) doesn't allow for such situation, so assume * that there was really no passphrase and in that case * key files are no properly defined in loader.conf. */ G_ELI_DEBUG(0, "Found no key files in loader.conf for %s.", pp->name); return (NULL); } /* Ask for the passphrase if defined. */ if (md.md_iterations >= 0) { /* Try first with cached passphrase. */ if (i == 0) { if (!g_eli_boot_passcache) continue; memcpy(passphrase, cached_passphrase, sizeof(passphrase)); } else { printf("Enter passphrase for %s: ", pp->name); showpass = g_eli_visible_passphrase; if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) showpass = GETS_ECHOPASS; cngets(passphrase, sizeof(passphrase), showpass); memcpy(cached_passphrase, passphrase, sizeof(passphrase)); } } /* * Prepare Derived-Key from the user passphrase. */ if (md.md_iterations == 0) { g_eli_crypto_hmac_update(&ctx, md.md_salt, sizeof(md.md_salt)); g_eli_crypto_hmac_update(&ctx, passphrase, strlen(passphrase)); explicit_bzero(passphrase, sizeof(passphrase)); } else if (md.md_iterations > 0) { u_char dkey[G_ELI_USERKEYLEN]; pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, sizeof(md.md_salt), passphrase, md.md_iterations); bzero(passphrase, sizeof(passphrase)); g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); explicit_bzero(dkey, sizeof(dkey)); } g_eli_crypto_hmac_final(&ctx, key, 0); /* * Decrypt Master-Key. */ error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey); bzero(key, sizeof(key)); if (error == -1) { if (i == tries) { G_ELI_DEBUG(0, "Wrong key for %s. No tries left.", pp->name); g_eli_keyfiles_clear(pp->name); return (NULL); } if (i > 0) { G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", pp->name, tries - i); } /* Try again. */ continue; } else if (error > 0) { G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", pp->name, error); g_eli_keyfiles_clear(pp->name); return (NULL); } g_eli_keyfiles_clear(pp->name); G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); break; } have_key: /* * We have correct key, let's attach provider. */ gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); bzero(mkey, sizeof(mkey)); bzero(&md, sizeof(md)); if (gp == NULL) { G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, G_ELI_SUFFIX); return (NULL); } return (gp); } static void g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_eli_softc *sc; g_topology_assert(); sc = gp->softc; if (sc == NULL) return; if (pp != NULL || cp != NULL) return; /* Nothing here. */ sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_total); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_allocated); sbuf_printf(sb, "%s", indent); if (sc->sc_flags == 0) sbuf_cat(sb, "NONE"); else { int first = 1; #define ADD_FLAG(flag, name) do { \ if (sc->sc_flags & (flag)) { \ if (!first) \ sbuf_cat(sb, ", "); \ else \ first = 0; \ sbuf_cat(sb, name); \ } \ } while (0) ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); ADD_FLAG(G_ELI_FLAG_AUTORESIZE, "AUTORESIZE"); #undef ADD_FLAG } sbuf_cat(sb, "\n"); if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { sbuf_printf(sb, "%s%u\n", indent, sc->sc_nkey); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_version); sbuf_printf(sb, "%s", indent); switch (sc->sc_crypto) { case G_ELI_CRYPTO_HW: sbuf_cat(sb, "hardware"); break; case G_ELI_CRYPTO_SW: sbuf_cat(sb, "software"); break; + case G_ELI_CRYPTO_SW_ACCEL: + sbuf_cat(sb, "accelerated software"); + break; default: sbuf_cat(sb, "UNKNOWN"); break; } sbuf_cat(sb, "\n"); if (sc->sc_flags & G_ELI_FLAG_AUTH) { sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_aalgo)); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_ekeylen); sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_ealgo)); sbuf_printf(sb, "%s%s\n", indent, (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); } static void g_eli_shutdown_pre_sync(void *arg, int howto) { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp; struct g_eli_softc *sc; int error; mp = arg; g_topology_lock(); LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { sc = gp->softc; if (sc == NULL) continue; pp = LIST_FIRST(&gp->provider); KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0 || SCHEDULER_STOPPED()) { sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; } else { error = g_eli_destroy(sc, TRUE); } } g_topology_unlock(); } static void g_eli_init(struct g_class *mp) { g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); if (g_eli_pre_sync == NULL) G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); } static void g_eli_fini(struct g_class *mp) { if (g_eli_pre_sync != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); } DECLARE_GEOM_CLASS(g_eli_class, g_eli); MODULE_DEPEND(g_eli, crypto, 1, 1, 1); MODULE_VERSION(geom_eli, 0); diff --git a/sys/geom/eli/g_eli.h b/sys/geom/eli/g_eli.h index 1853aa17dbc1..e66bdaf409b6 100644 --- a/sys/geom/eli/g_eli.h +++ b/sys/geom/eli/g_eli.h @@ -1,739 +1,740 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2019 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _G_ELI_H_ #define _G_ELI_H_ #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #else #include #include #include #include #endif #include #include #ifndef _OpenSSL_ #include #endif #define G_ELI_CLASS_NAME "ELI" #define G_ELI_MAGIC "GEOM::ELI" #define G_ELI_SUFFIX ".eli" /* * Version history: * 0 - Initial version number. * 1 - Added data authentication support (md_aalgo field and * G_ELI_FLAG_AUTH flag). * 2 - Added G_ELI_FLAG_READONLY. * 3 - Added 'configure' subcommand. * 4 - IV is generated from offset converted to little-endian * (the G_ELI_FLAG_NATIVE_BYTE_ORDER flag will be set for older versions). * 5 - Added multiple encrypton keys and AES-XTS support. * 6 - Fixed usage of multiple keys for authenticated providers (the * G_ELI_FLAG_FIRST_KEY flag will be set for older versions). * 7 - Encryption keys are now generated from the Data Key and not from the * IV Key (the G_ELI_FLAG_ENC_IVKEY flag will be set for older versions). */ #define G_ELI_VERSION_00 0 #define G_ELI_VERSION_01 1 #define G_ELI_VERSION_02 2 #define G_ELI_VERSION_03 3 #define G_ELI_VERSION_04 4 #define G_ELI_VERSION_05 5 #define G_ELI_VERSION_06 6 #define G_ELI_VERSION_07 7 #define G_ELI_VERSION G_ELI_VERSION_07 /* ON DISK FLAGS. */ /* Use random, onetime keys. */ #define G_ELI_FLAG_ONETIME 0x00000001 /* Ask for the passphrase from the kernel, before mounting root. */ #define G_ELI_FLAG_BOOT 0x00000002 /* Detach on last close, if we were open for writing. */ #define G_ELI_FLAG_WO_DETACH 0x00000004 /* Detach on last close. */ #define G_ELI_FLAG_RW_DETACH 0x00000008 /* Provide data authentication. */ #define G_ELI_FLAG_AUTH 0x00000010 /* Provider is read-only, we should deny all write attempts. */ #define G_ELI_FLAG_RO 0x00000020 /* Don't pass through BIO_DELETE requests. */ #define G_ELI_FLAG_NODELETE 0x00000040 /* This GELI supports GELIBoot */ #define G_ELI_FLAG_GELIBOOT 0x00000080 /* Hide passphrase length in GELIboot. */ #define G_ELI_FLAG_GELIDISPLAYPASS 0x00000100 /* Expand provider automatically. */ #define G_ELI_FLAG_AUTORESIZE 0x00000200 /* RUNTIME FLAGS. */ /* Provider was open for writing. */ #define G_ELI_FLAG_WOPEN 0x00010000 /* Destroy device. */ #define G_ELI_FLAG_DESTROY 0x00020000 /* Provider uses native byte-order for IV generation. */ #define G_ELI_FLAG_NATIVE_BYTE_ORDER 0x00040000 /* Provider uses single encryption key. */ #define G_ELI_FLAG_SINGLE_KEY 0x00080000 /* Device suspended. */ #define G_ELI_FLAG_SUSPEND 0x00100000 /* Provider uses first encryption key. */ #define G_ELI_FLAG_FIRST_KEY 0x00200000 /* Provider uses IV-Key for encryption key generation. */ #define G_ELI_FLAG_ENC_IVKEY 0x00400000 #define G_ELI_NEW_BIO 255 #define SHA512_MDLEN 64 #define G_ELI_AUTH_SECKEYLEN SHA256_DIGEST_LENGTH #define G_ELI_MAXMKEYS 2 #define G_ELI_MAXKEYLEN 64 #define G_ELI_USERKEYLEN G_ELI_MAXKEYLEN #define G_ELI_DATAKEYLEN G_ELI_MAXKEYLEN #define G_ELI_AUTHKEYLEN G_ELI_MAXKEYLEN #define G_ELI_IVKEYLEN G_ELI_MAXKEYLEN #define G_ELI_SALTLEN 64 #define G_ELI_DATAIVKEYLEN (G_ELI_DATAKEYLEN + G_ELI_IVKEYLEN) /* Data-Key, IV-Key, HMAC_SHA512(Derived-Key, Data-Key+IV-Key) */ #define G_ELI_MKEYLEN (G_ELI_DATAIVKEYLEN + SHA512_MDLEN) #define G_ELI_OVERWRITES 5 /* Switch data encryption key every 2^20 blocks. */ #define G_ELI_KEY_SHIFT 20 #define G_ELI_CRYPTO_UNKNOWN 0 #define G_ELI_CRYPTO_HW 1 #define G_ELI_CRYPTO_SW 2 +#define G_ELI_CRYPTO_SW_ACCEL 3 #ifdef _KERNEL #if (MAX_KEY_BYTES < G_ELI_DATAIVKEYLEN) #error "MAX_KEY_BYTES is less than G_ELI_DATAKEYLEN" #endif extern int g_eli_debug; extern u_int g_eli_overwrites; extern u_int g_eli_batch; #define G_ELI_DEBUG(lvl, ...) \ _GEOM_DEBUG("GEOM_ELI", g_eli_debug, (lvl), NULL, __VA_ARGS__) #define G_ELI_LOGREQ(lvl, bp, ...) \ _GEOM_DEBUG("GEOM_ELI", g_eli_debug, (lvl), (bp), __VA_ARGS__) struct g_eli_worker { struct g_eli_softc *w_softc; struct proc *w_proc; void *w_first_key; u_int w_number; crypto_session_t w_sid; boolean_t w_active; LIST_ENTRY(g_eli_worker) w_next; }; #endif /* _KERNEL */ struct g_eli_softc { struct g_geom *sc_geom; u_int sc_version; u_int sc_crypto; uint8_t sc_mkey[G_ELI_DATAIVKEYLEN]; uint8_t sc_ekey[G_ELI_DATAKEYLEN]; TAILQ_HEAD(, g_eli_key) sc_ekeys_queue; RB_HEAD(g_eli_key_tree, g_eli_key) sc_ekeys_tree; struct mtx sc_ekeys_lock; uint64_t sc_ekeys_total; uint64_t sc_ekeys_allocated; u_int sc_ealgo; u_int sc_ekeylen; uint8_t sc_akey[G_ELI_AUTHKEYLEN]; u_int sc_aalgo; u_int sc_akeylen; u_int sc_alen; SHA256_CTX sc_akeyctx; uint8_t sc_ivkey[G_ELI_IVKEYLEN]; SHA256_CTX sc_ivctx; int sc_nkey; uint32_t sc_flags; int sc_inflight; off_t sc_mediasize; size_t sc_sectorsize; off_t sc_provsize; u_int sc_bytes_per_sector; u_int sc_data_per_sector; #ifndef _KERNEL int sc_cpubind; #else /* _KERNEL */ boolean_t sc_cpubind; /* Only for software cryptography. */ struct bio_queue_head sc_queue; struct mtx sc_queue_mtx; LIST_HEAD(, g_eli_worker) sc_workers; #endif /* _KERNEL */ }; #define sc_name sc_geom->name #define G_ELI_KEY_MAGIC 0xe11341c struct g_eli_key { /* Key value, must be first in the structure. */ uint8_t gek_key[G_ELI_DATAKEYLEN]; /* Magic. */ int gek_magic; /* Key number. */ uint64_t gek_keyno; /* Reference counter. */ int gek_count; /* Keeps keys sorted by most recent use. */ TAILQ_ENTRY(g_eli_key) gek_next; /* Keeps keys sorted by number. */ RB_ENTRY(g_eli_key) gek_link; }; struct g_eli_metadata { char md_magic[16]; /* Magic value. */ uint32_t md_version; /* Version number. */ uint32_t md_flags; /* Additional flags. */ uint16_t md_ealgo; /* Encryption algorithm. */ uint16_t md_keylen; /* Key length. */ uint16_t md_aalgo; /* Authentication algorithm. */ uint64_t md_provsize; /* Provider's size. */ uint32_t md_sectorsize; /* Sector size. */ uint8_t md_keys; /* Available keys. */ int32_t md_iterations; /* Number of iterations for PKCS#5v2. */ uint8_t md_salt[G_ELI_SALTLEN]; /* Salt. */ /* Encrypted master key (IV-key, Data-key, HMAC). */ uint8_t md_mkeys[G_ELI_MAXMKEYS * G_ELI_MKEYLEN]; u_char md_hash[16]; /* MD5 hash. */ } __packed; #ifndef _OpenSSL_ static __inline void eli_metadata_encode_v0(struct g_eli_metadata *md, u_char **datap) { u_char *p; p = *datap; le32enc(p, md->md_flags); p += sizeof(md->md_flags); le16enc(p, md->md_ealgo); p += sizeof(md->md_ealgo); le16enc(p, md->md_keylen); p += sizeof(md->md_keylen); le64enc(p, md->md_provsize); p += sizeof(md->md_provsize); le32enc(p, md->md_sectorsize); p += sizeof(md->md_sectorsize); *p = md->md_keys; p += sizeof(md->md_keys); le32enc(p, md->md_iterations); p += sizeof(md->md_iterations); bcopy(md->md_salt, p, sizeof(md->md_salt)); p += sizeof(md->md_salt); bcopy(md->md_mkeys, p, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys); *datap = p; } static __inline void eli_metadata_encode_v1v2v3v4v5v6v7(struct g_eli_metadata *md, u_char **datap) { u_char *p; p = *datap; le32enc(p, md->md_flags); p += sizeof(md->md_flags); le16enc(p, md->md_ealgo); p += sizeof(md->md_ealgo); le16enc(p, md->md_keylen); p += sizeof(md->md_keylen); le16enc(p, md->md_aalgo); p += sizeof(md->md_aalgo); le64enc(p, md->md_provsize); p += sizeof(md->md_provsize); le32enc(p, md->md_sectorsize); p += sizeof(md->md_sectorsize); *p = md->md_keys; p += sizeof(md->md_keys); le32enc(p, md->md_iterations); p += sizeof(md->md_iterations); bcopy(md->md_salt, p, sizeof(md->md_salt)); p += sizeof(md->md_salt); bcopy(md->md_mkeys, p, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys); *datap = p; } static __inline void eli_metadata_encode(struct g_eli_metadata *md, u_char *data) { uint32_t hash[4]; MD5_CTX ctx; u_char *p; p = data; bcopy(md->md_magic, p, sizeof(md->md_magic)); p += sizeof(md->md_magic); le32enc(p, md->md_version); p += sizeof(md->md_version); switch (md->md_version) { case G_ELI_VERSION_00: eli_metadata_encode_v0(md, &p); break; case G_ELI_VERSION_01: case G_ELI_VERSION_02: case G_ELI_VERSION_03: case G_ELI_VERSION_04: case G_ELI_VERSION_05: case G_ELI_VERSION_06: case G_ELI_VERSION_07: eli_metadata_encode_v1v2v3v4v5v6v7(md, &p); break; default: #ifdef _KERNEL panic("%s: Unsupported version %u.", __func__, (u_int)md->md_version); #else assert(!"Unsupported metadata version."); #endif } MD5Init(&ctx); MD5Update(&ctx, data, p - data); MD5Final((void *)hash, &ctx); bcopy(hash, md->md_hash, sizeof(md->md_hash)); bcopy(md->md_hash, p, sizeof(md->md_hash)); } static __inline int eli_metadata_decode_v0(const u_char *data, struct g_eli_metadata *md) { uint32_t hash[4]; MD5_CTX ctx; const u_char *p; p = data + sizeof(md->md_magic) + sizeof(md->md_version); md->md_flags = le32dec(p); p += sizeof(md->md_flags); md->md_ealgo = le16dec(p); p += sizeof(md->md_ealgo); md->md_keylen = le16dec(p); p += sizeof(md->md_keylen); md->md_provsize = le64dec(p); p += sizeof(md->md_provsize); md->md_sectorsize = le32dec(p); p += sizeof(md->md_sectorsize); md->md_keys = *p; p += sizeof(md->md_keys); md->md_iterations = le32dec(p); p += sizeof(md->md_iterations); bcopy(p, md->md_salt, sizeof(md->md_salt)); p += sizeof(md->md_salt); bcopy(p, md->md_mkeys, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys); MD5Init(&ctx); MD5Update(&ctx, data, p - data); MD5Final((void *)hash, &ctx); bcopy(hash, md->md_hash, sizeof(md->md_hash)); if (bcmp(md->md_hash, p, 16) != 0) return (EINVAL); return (0); } static __inline int eli_metadata_decode_v1v2v3v4v5v6v7(const u_char *data, struct g_eli_metadata *md) { uint32_t hash[4]; MD5_CTX ctx; const u_char *p; p = data + sizeof(md->md_magic) + sizeof(md->md_version); md->md_flags = le32dec(p); p += sizeof(md->md_flags); md->md_ealgo = le16dec(p); p += sizeof(md->md_ealgo); md->md_keylen = le16dec(p); p += sizeof(md->md_keylen); md->md_aalgo = le16dec(p); p += sizeof(md->md_aalgo); md->md_provsize = le64dec(p); p += sizeof(md->md_provsize); md->md_sectorsize = le32dec(p); p += sizeof(md->md_sectorsize); md->md_keys = *p; p += sizeof(md->md_keys); md->md_iterations = le32dec(p); p += sizeof(md->md_iterations); bcopy(p, md->md_salt, sizeof(md->md_salt)); p += sizeof(md->md_salt); bcopy(p, md->md_mkeys, sizeof(md->md_mkeys)); p += sizeof(md->md_mkeys); MD5Init(&ctx); MD5Update(&ctx, data, p - data); MD5Final((void *)hash, &ctx); bcopy(hash, md->md_hash, sizeof(md->md_hash)); if (bcmp(md->md_hash, p, 16) != 0) return (EINVAL); return (0); } static __inline int eli_metadata_decode(const u_char *data, struct g_eli_metadata *md) { int error; bcopy(data, md->md_magic, sizeof(md->md_magic)); if (strcmp(md->md_magic, G_ELI_MAGIC) != 0) return (EINVAL); md->md_version = le32dec(data + sizeof(md->md_magic)); switch (md->md_version) { case G_ELI_VERSION_00: error = eli_metadata_decode_v0(data, md); break; case G_ELI_VERSION_01: case G_ELI_VERSION_02: case G_ELI_VERSION_03: case G_ELI_VERSION_04: case G_ELI_VERSION_05: case G_ELI_VERSION_06: case G_ELI_VERSION_07: error = eli_metadata_decode_v1v2v3v4v5v6v7(data, md); break; default: error = EOPNOTSUPP; break; } return (error); } #endif /* !_OpenSSL */ static __inline u_int g_eli_str2ealgo(const char *name) { if (strcasecmp("null", name) == 0) return (CRYPTO_NULL_CBC); else if (strcasecmp("null-cbc", name) == 0) return (CRYPTO_NULL_CBC); else if (strcasecmp("aes", name) == 0) return (CRYPTO_AES_XTS); else if (strcasecmp("aes-cbc", name) == 0) return (CRYPTO_AES_CBC); else if (strcasecmp("aes-xts", name) == 0) return (CRYPTO_AES_XTS); else if (strcasecmp("camellia", name) == 0) return (CRYPTO_CAMELLIA_CBC); else if (strcasecmp("camellia-cbc", name) == 0) return (CRYPTO_CAMELLIA_CBC); return (CRYPTO_ALGORITHM_MIN - 1); } static __inline u_int g_eli_str2aalgo(const char *name) { if (strcasecmp("hmac/sha1", name) == 0) return (CRYPTO_SHA1_HMAC); else if (strcasecmp("hmac/ripemd160", name) == 0) return (CRYPTO_RIPEMD160_HMAC); else if (strcasecmp("hmac/sha256", name) == 0) return (CRYPTO_SHA2_256_HMAC); else if (strcasecmp("hmac/sha384", name) == 0) return (CRYPTO_SHA2_384_HMAC); else if (strcasecmp("hmac/sha512", name) == 0) return (CRYPTO_SHA2_512_HMAC); return (CRYPTO_ALGORITHM_MIN - 1); } static __inline const char * g_eli_algo2str(u_int algo) { switch (algo) { case CRYPTO_NULL_CBC: return ("NULL"); case CRYPTO_AES_CBC: return ("AES-CBC"); case CRYPTO_AES_XTS: return ("AES-XTS"); case CRYPTO_CAMELLIA_CBC: return ("CAMELLIA-CBC"); case CRYPTO_SHA1_HMAC: return ("HMAC/SHA1"); case CRYPTO_RIPEMD160_HMAC: return ("HMAC/RIPEMD160"); case CRYPTO_SHA2_256_HMAC: return ("HMAC/SHA256"); case CRYPTO_SHA2_384_HMAC: return ("HMAC/SHA384"); case CRYPTO_SHA2_512_HMAC: return ("HMAC/SHA512"); } return ("unknown"); } static __inline void eli_metadata_dump(const struct g_eli_metadata *md) { static const char hex[] = "0123456789abcdef"; char str[sizeof(md->md_mkeys) * 2 + 1]; u_int i; printf(" magic: %s\n", md->md_magic); printf(" version: %u\n", (u_int)md->md_version); printf(" flags: 0x%x\n", (u_int)md->md_flags); printf(" ealgo: %s\n", g_eli_algo2str(md->md_ealgo)); printf(" keylen: %u\n", (u_int)md->md_keylen); if (md->md_flags & G_ELI_FLAG_AUTH) printf(" aalgo: %s\n", g_eli_algo2str(md->md_aalgo)); printf(" provsize: %ju\n", (uintmax_t)md->md_provsize); printf("sectorsize: %u\n", (u_int)md->md_sectorsize); printf(" keys: 0x%02x\n", (u_int)md->md_keys); printf("iterations: %d\n", (int)md->md_iterations); bzero(str, sizeof(str)); for (i = 0; i < sizeof(md->md_salt); i++) { str[i * 2] = hex[md->md_salt[i] >> 4]; str[i * 2 + 1] = hex[md->md_salt[i] & 0x0f]; } printf(" Salt: %s\n", str); bzero(str, sizeof(str)); for (i = 0; i < sizeof(md->md_mkeys); i++) { str[i * 2] = hex[md->md_mkeys[i] >> 4]; str[i * 2 + 1] = hex[md->md_mkeys[i] & 0x0f]; } printf("Master Key: %s\n", str); bzero(str, sizeof(str)); for (i = 0; i < 16; i++) { str[i * 2] = hex[md->md_hash[i] >> 4]; str[i * 2 + 1] = hex[md->md_hash[i] & 0x0f]; } printf(" MD5 hash: %s\n", str); } #ifdef _KERNEL static __inline bool eli_metadata_crypto_supported(const struct g_eli_metadata *md) { switch (md->md_ealgo) { case CRYPTO_NULL_CBC: case CRYPTO_AES_CBC: case CRYPTO_CAMELLIA_CBC: case CRYPTO_AES_XTS: break; default: return (false); } if (md->md_flags & G_ELI_FLAG_AUTH) { switch (md->md_aalgo) { case CRYPTO_SHA1_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: break; default: return (false); } } return (true); } #endif static __inline u_int g_eli_keylen(u_int algo, u_int keylen) { switch (algo) { case CRYPTO_NULL_CBC: if (keylen == 0) keylen = 64 * 8; else { if (keylen > 64 * 8) keylen = 0; } return (keylen); case CRYPTO_AES_CBC: case CRYPTO_CAMELLIA_CBC: switch (keylen) { case 0: return (128); case 128: case 192: case 256: return (keylen); default: return (0); } case CRYPTO_AES_XTS: switch (keylen) { case 0: return (128); case 128: case 256: return (keylen); default: return (0); } default: return (0); } } static __inline u_int g_eli_ivlen(u_int algo) { switch (algo) { case CRYPTO_AES_XTS: return (AES_XTS_IV_LEN); case CRYPTO_AES_CBC: return (AES_BLOCK_LEN); case CRYPTO_CAMELLIA_CBC: return (CAMELLIA_BLOCK_LEN); } return (0); } static __inline u_int g_eli_hashlen(u_int algo) { switch (algo) { case CRYPTO_SHA1_HMAC: return (20); case CRYPTO_RIPEMD160_HMAC: return (20); case CRYPTO_SHA2_256_HMAC: return (32); case CRYPTO_SHA2_384_HMAC: return (48); case CRYPTO_SHA2_512_HMAC: return (64); } return (0); } static __inline off_t eli_mediasize(const struct g_eli_softc *sc, off_t mediasize, u_int sectorsize) { if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) { mediasize -= sectorsize; } if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0) { mediasize -= (mediasize % sc->sc_sectorsize); } else { mediasize /= sc->sc_bytes_per_sector; mediasize *= sc->sc_sectorsize; } return (mediasize); } static __inline void eli_metadata_softc(struct g_eli_softc *sc, const struct g_eli_metadata *md, u_int sectorsize, off_t mediasize) { sc->sc_version = md->md_version; sc->sc_inflight = 0; sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN; sc->sc_flags = md->md_flags; /* Backward compatibility. */ if (md->md_version < G_ELI_VERSION_04) sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER; if (md->md_version < G_ELI_VERSION_05) sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY; if (md->md_version < G_ELI_VERSION_06 && (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) { sc->sc_flags |= G_ELI_FLAG_FIRST_KEY; } if (md->md_version < G_ELI_VERSION_07) sc->sc_flags |= G_ELI_FLAG_ENC_IVKEY; sc->sc_ealgo = md->md_ealgo; if (sc->sc_flags & G_ELI_FLAG_AUTH) { sc->sc_akeylen = sizeof(sc->sc_akey) * 8; sc->sc_aalgo = md->md_aalgo; sc->sc_alen = g_eli_hashlen(sc->sc_aalgo); sc->sc_data_per_sector = sectorsize - sc->sc_alen; /* * Some hash functions (like SHA1 and RIPEMD160) generates hash * which length is not multiple of 128 bits, but we want data * length to be multiple of 128, so we can encrypt without * padding. The line below rounds down data length to multiple * of 128 bits. */ sc->sc_data_per_sector -= sc->sc_data_per_sector % 16; sc->sc_bytes_per_sector = (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1; sc->sc_bytes_per_sector *= sectorsize; } sc->sc_provsize = mediasize; sc->sc_sectorsize = md->md_sectorsize; sc->sc_mediasize = eli_mediasize(sc, mediasize, sectorsize); sc->sc_ekeylen = md->md_keylen; } #ifdef _KERNEL int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, struct g_eli_metadata *md); struct g_geom *g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, const struct g_eli_metadata *md, const u_char *mkey, int nkey); int g_eli_destroy(struct g_eli_softc *sc, boolean_t force); int g_eli_access(struct g_provider *pp, int dr, int dw, int de); void g_eli_config(struct gctl_req *req, struct g_class *mp, const char *verb); void g_eli_read_done(struct bio *bp); void g_eli_write_done(struct bio *bp); int g_eli_crypto_rerun(struct cryptop *crp); void g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker); void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp); void g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp); void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp); #endif void g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv, size_t size); void g_eli_mkey_hmac(unsigned char *mkey, const unsigned char *key); int g_eli_mkey_decrypt(const struct g_eli_metadata *md, const unsigned char *key, unsigned char *mkey, unsigned keyp); int g_eli_mkey_decrypt_any(const struct g_eli_metadata *md, const unsigned char *key, unsigned char *mkey, unsigned *nkeyp); int g_eli_mkey_encrypt(unsigned algo, const unsigned char *key, unsigned keylen, unsigned char *mkey); #ifdef _KERNEL void g_eli_mkey_propagate(struct g_eli_softc *sc, const unsigned char *mkey); #endif int g_eli_crypto_encrypt(u_int algo, u_char *data, size_t datasize, const u_char *key, size_t keysize); int g_eli_crypto_decrypt(u_int algo, u_char *data, size_t datasize, const u_char *key, size_t keysize); struct hmac_ctx { SHA512_CTX innerctx; SHA512_CTX outerctx; }; void g_eli_crypto_hmac_init(struct hmac_ctx *ctx, const char *hkey, size_t hkeylen); void g_eli_crypto_hmac_update(struct hmac_ctx *ctx, const uint8_t *data, size_t datasize); void g_eli_crypto_hmac_final(struct hmac_ctx *ctx, uint8_t *md, size_t mdsize); void g_eli_crypto_hmac(const char *hkey, size_t hkeysize, const uint8_t *data, size_t datasize, uint8_t *md, size_t mdsize); void g_eli_key_fill(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno); #ifdef _KERNEL void g_eli_key_init(struct g_eli_softc *sc); void g_eli_key_destroy(struct g_eli_softc *sc); void g_eli_key_resize(struct g_eli_softc *sc); uint8_t *g_eli_key_hold(struct g_eli_softc *sc, off_t offset, size_t blocksize); void g_eli_key_drop(struct g_eli_softc *sc, uint8_t *rawkey); #endif #endif /* !_G_ELI_H_ */ diff --git a/sys/mips/cavium/cryptocteon/cryptocteon.c b/sys/mips/cavium/cryptocteon/cryptocteon.c index 9e6bc0c358ea..d03b35db56b4 100644 --- a/sys/mips/cavium/cryptocteon/cryptocteon.c +++ b/sys/mips/cavium/cryptocteon/cryptocteon.c @@ -1,424 +1,425 @@ /* * Octeon Crypto for OCF * * Written by David McCullough * Copyright (C) 2009 David McCullough * * LICENSE TERMS * * The free distribution and use of this software in both source and binary * form is allowed (with or without changes) provided that: * * 1. distributions of this source code include the above copyright * notice, this list of conditions and the following disclaimer; * * 2. distributions in binary form include the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other associated materials; * * 3. the copyright holder's name is not used to endorse products * built using this software without specific written permission. * * DISCLAIMER * * This software is provided 'as is' with no explicit or implied warranties * in respect of its properties, including, but not limited to, correctness * and/or fitness for purpose. * --------------------------------------------------------------------------- */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct cryptocteon_softc { int32_t sc_cid; /* opencrypto id */ }; int cryptocteon_debug = 0; TUNABLE_INT("hw.cryptocteon.debug", &cryptocteon_debug); static void cryptocteon_identify(driver_t *, device_t); static int cryptocteon_probe(device_t); static int cryptocteon_attach(device_t); static int cryptocteon_process(device_t, struct cryptop *, int); static int cryptocteon_probesession(device_t, const struct crypto_session_params *); static int cryptocteon_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static void cryptocteon_identify(driver_t *drv, device_t parent) { if (octeon_has_feature(OCTEON_FEATURE_CRYPTO)) BUS_ADD_CHILD(parent, 0, "cryptocteon", 0); } static int cryptocteon_probe(device_t dev) { device_set_desc(dev, "Octeon Secure Coprocessor"); return (0); } static int cryptocteon_attach(device_t dev) { struct cryptocteon_softc *sc; sc = device_get_softc(dev); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct octo_sess), - CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | + CRYPTOCAP_F_ACCEL_SOFTWARE); if (sc->sc_cid < 0) { device_printf(dev, "crypto_get_driverid ret %d\n", sc->sc_cid); return (ENXIO); } return (0); } static bool cryptocteon_auth_supported(const struct crypto_session_params *csp) { u_int hash_len; switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: hash_len = SHA1_HASH_LEN; break; default: return (false); } if (csp->csp_auth_klen > hash_len) return (false); return (true); } static bool cryptocteon_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != 16) return (false); if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 24 && csp->csp_cipher_klen != 32) return (false); break; default: return (false); } return (true); } static int cryptocteon_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!cryptocteon_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!cryptocteon_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!cryptocteon_auth_supported(csp) || !cryptocteon_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void cryptocteon_calc_hash(const struct crypto_session_params *csp, const char *key, struct octo_sess *ocd) { char hash_key[SHA1_HASH_LEN]; memset(hash_key, 0, sizeof(hash_key)); memcpy(hash_key, key, csp->csp_auth_klen); octo_calc_hash(csp->csp_auth_alg == CRYPTO_SHA1_HMAC, hash_key, ocd->octo_hminner, ocd->octo_hmouter); } /* Generate a new octo session. */ static int cryptocteon_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct cryptocteon_softc *sc; struct octo_sess *ocd; sc = device_get_softc(dev); ocd = crypto_get_driver_session(cses); ocd->octo_encklen = csp->csp_cipher_klen; if (csp->csp_cipher_key != NULL) memcpy(ocd->octo_enckey, csp->csp_cipher_key, ocd->octo_encklen); if (csp->csp_auth_key != NULL) cryptocteon_calc_hash(csp, csp->csp_auth_key, ocd); ocd->octo_mlen = csp->csp_auth_mlen; if (csp->csp_auth_mlen == 0) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: ocd->octo_mlen = SHA1_HASH_LEN; break; } } switch (csp->csp_mode) { case CSP_MODE_DIGEST: switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: ocd->octo_encrypt = octo_null_sha1_encrypt; ocd->octo_decrypt = octo_null_sha1_encrypt; break; } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: ocd->octo_encrypt = octo_aes_cbc_encrypt; ocd->octo_decrypt = octo_aes_cbc_decrypt; break; } break; case CSP_MODE_ETA: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: ocd->octo_encrypt = octo_aes_cbc_sha1_encrypt; ocd->octo_decrypt = octo_aes_cbc_sha1_decrypt; break; } break; } break; } KASSERT(ocd->octo_encrypt != NULL && ocd->octo_decrypt != NULL, ("%s: missing function pointers", __func__)); return (0); } /* * Process a request. */ static int cryptocteon_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct octo_sess *od; size_t iovcnt, iovlen; struct mbuf *m = NULL; struct uio *uiop = NULL; unsigned char *ivp = NULL; unsigned char iv_data[16]; unsigned char icv[SHA1_HASH_LEN], icv2[SHA1_HASH_LEN]; int auth_off, auth_len, crypt_off, crypt_len; struct cryptocteon_softc *sc; sc = device_get_softc(dev); crp->crp_etype = 0; od = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* * The crypto routines assume that the regions to auth and * cipher are exactly 8 byte multiples and aligned on 8 * byte logical boundaries within the iovecs. */ if (crp->crp_aad_length % 8 != 0 || crp->crp_payload_length % 8 != 0) { crp->crp_etype = EFBIG; goto done; } /* * As currently written, the crypto routines assume the AAD and * payload are adjacent. */ if (crp->crp_aad_length != 0 && crp->crp_payload_start != crp->crp_aad_start + crp->crp_aad_length) { crp->crp_etype = EFBIG; goto done; } crypt_off = crp->crp_payload_start; crypt_len = crp->crp_payload_length; if (crp->crp_aad_length != 0) { auth_off = crp->crp_aad_start; auth_len = crp->crp_aad_length + crp->crp_payload_length; } else { auth_off = crypt_off; auth_len = crypt_len; } /* * do some error checking outside of the loop for m and IOV processing * this leaves us with valid m or uiop pointers for later */ switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: { unsigned frags; m = crp->crp_buf.cb_mbuf; for (frags = 0; m != NULL; frags++) m = m->m_next; if (frags >= UIO_MAXIOV) { printf("%s,%d: %d frags > UIO_MAXIOV", __FILE__, __LINE__, frags); crp->crp_etype = EFBIG; goto done; } m = crp->crp_buf.cb_mbuf; break; } case CRYPTO_BUF_UIO: uiop = crp->crp_buf.cb_uio; if (uiop->uio_iovcnt > UIO_MAXIOV) { printf("%s,%d: %d uio_iovcnt > UIO_MAXIOV", __FILE__, __LINE__, uiop->uio_iovcnt); crp->crp_etype = EFBIG; goto done; } break; } if (csp->csp_cipher_alg != 0) { if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) ivp = crp->crp_iv; else { crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv_data); ivp = iv_data; } } /* * setup the I/O vector to cover the buffer */ switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: iovcnt = 0; iovlen = 0; while (m != NULL) { od->octo_iov[iovcnt].iov_base = mtod(m, void *); od->octo_iov[iovcnt].iov_len = m->m_len; m = m->m_next; iovlen += od->octo_iov[iovcnt++].iov_len; } break; case CRYPTO_BUF_UIO: iovlen = 0; for (iovcnt = 0; iovcnt < uiop->uio_iovcnt; iovcnt++) { od->octo_iov[iovcnt].iov_base = uiop->uio_iov[iovcnt].iov_base; od->octo_iov[iovcnt].iov_len = uiop->uio_iov[iovcnt].iov_len; iovlen += od->octo_iov[iovcnt].iov_len; } break; case CRYPTO_BUF_CONTIG: iovlen = crp->crp_buf.cb_buf_len; od->octo_iov[0].iov_base = crp->crp_buf.cb_buf; od->octo_iov[0].iov_len = crp->crp_buf.cb_buf_len; iovcnt = 1; break; default: panic("can't happen"); } /* * setup a new explicit key */ if (crp->crp_cipher_key != NULL) memcpy(od->octo_enckey, crp->crp_cipher_key, od->octo_encklen); if (crp->crp_auth_key != NULL) cryptocteon_calc_hash(csp, crp->crp_auth_key, od); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) (*od->octo_encrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv, ivp); else (*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv, ivp); if (csp->csp_auth_alg != 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, od->octo_mlen, icv2); if (timingsafe_bcmp(icv, icv2, od->octo_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, od->octo_mlen, icv); } done: crypto_done(crp); return (0); } static device_method_t cryptocteon_methods[] = { /* device methods */ DEVMETHOD(device_identify, cryptocteon_identify), DEVMETHOD(device_probe, cryptocteon_probe), DEVMETHOD(device_attach, cryptocteon_attach), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, cryptocteon_probesession), DEVMETHOD(cryptodev_newsession, cryptocteon_newsession), DEVMETHOD(cryptodev_process, cryptocteon_process), { 0, 0 } }; static driver_t cryptocteon_driver = { "cryptocteon", cryptocteon_methods, sizeof (struct cryptocteon_softc), }; static devclass_t cryptocteon_devclass; DRIVER_MODULE(cryptocteon, nexus, cryptocteon_driver, cryptocteon_devclass, 0, 0); diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h index d51df5035731..836cb3b38acd 100644 --- a/sys/opencrypto/cryptodev.h +++ b/sys/opencrypto/cryptodev.h @@ -1,704 +1,705 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #ifdef _KERNEL #include #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 /* Hash values */ #define NULL_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define AES_GMAC_HASH_LEN 16 #define POLY1305_HASH_LEN 16 #define AES_CBC_MAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 #define AES_128_CBC_MAC_KEY_LEN 16 #define AES_192_CBC_MAC_KEY_LEN 24 #define AES_256_CBC_MAC_KEY_LEN 32 #define POLY1305_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define AES_GCM_IV_LEN 12 #define AES_CCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define CAMELLIA_MIN_KEY 16 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #ifdef _KERNEL #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #endif #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_POLY1305 38 #define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */ #define CRYPTO_AES_CCM_16 40 /* cipher side */ #define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ }; /* * session and crypt _op structs are used by userspace programs to interact * with /dev/crypto. Confusingly, the internal kernel interface is named * "cryptop" (no underscore). */ struct session2_op { u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 u_int16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; caddr_t mac; /* must be big enough for chosen MAC */ c_caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; c_caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ c_caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ #define CRIOGET _IOWR('c', 100, u_int32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, u_int32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, u_int32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptotstat { struct timespec acc; /* total accumulated time */ struct timespec min; /* min time */ struct timespec max; /* max time */ u_int32_t count; /* number of observations */ }; struct cryptostats { u_int32_t cs_ops; /* symmetric crypto ops submitted */ u_int32_t cs_errs; /* symmetric crypto ops that failed */ u_int32_t cs_kops; /* asymetric/key ops submitted */ u_int32_t cs_kerrs; /* asymetric/key ops that failed */ u_int32_t cs_intrs; /* crypto swi thread activations */ u_int32_t cs_rets; /* crypto return thread activations */ u_int32_t cs_blocks; /* symmetric op driver block */ u_int32_t cs_kblocks; /* symmetric op driver block */ /* * When CRYPTO_TIMING is defined at compile time and the * sysctl debug.crypto is set to 1, the crypto system will * accumulate statistics about how long it takes to process * crypto requests at various points during processing. */ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */ struct cryptotstat cs_cb; /* crypto_done -> callback */ struct cryptotstat cs_finis; /* callback -> callback return */ }; #ifdef _KERNEL /* * Return values for cryptodev_probesession methods. */ #define CRYPTODEV_PROBE_HARDWARE (-100) #define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200) #define CRYPTODEV_PROBE_SOFTWARE (-500) #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif struct crypto_session_params { int csp_mode; /* Type of operations to perform. */ #define CSP_MODE_NONE 0 #define CSP_MODE_COMPRESS 1 /* Compression/decompression. */ #define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */ #define CSP_MODE_DIGEST 3 /* Compute/verify digest. */ #define CSP_MODE_AEAD 4 /* Combined auth/encryption. */ #define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */ int csp_flags; #define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ const void *csp_cipher_key; int csp_auth_alg; int csp_auth_klen; /* Key length in bytes. */ const void *csp_auth_key; int csp_auth_mlen; /* Number of digest bytes to use. 0 means all. */ }; enum crypto_buffer_type { CRYPTO_BUF_NONE = 0, CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF }; /* * Description of a data buffer for a request. Requests can either * have a single buffer that is modified in place or separate input * and output buffers. */ struct crypto_buffer { union { struct { char *cb_buf; int cb_buf_len; }; struct mbuf *cb_mbuf; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; }; /* * A cursor is used to iterate through a crypto request data buffer. */ struct crypto_buffer_cursor { union { char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; }; union { int cc_buf_len; size_t cc_offset; }; enum crypto_buffer_type cc_type; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; crypto_session_t crp_session; /* Session */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ #define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads * if op is synchronous */ #define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* * Dispatch the crypto jobs in the same * order there are submitted. Applied only * if CRYPTO_F_ASYNC flags is set */ #define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */ int crp_op; struct crypto_buffer crp_buf; struct crypto_buffer crp_obuf; int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ int crp_iv_start; /* Location of IV. IV length is from * the session. */ int crp_payload_start; /* Location of ciphertext. */ int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. */ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */ const void *crp_cipher_key; /* New cipher key if non-NULL. */ const void *crp_auth_key; /* New auth key if non-NULL. */ void *crp_opaque; /* Opaque pointer, passed along */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; static __inline void _crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) { cb->cb_buf = buf; cb->cb_buf_len = len; cb->cb_type = CRYPTO_BUF_CONTIG; } static __inline void _crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) { cb->cb_mbuf = m; cb->cb_type = CRYPTO_BUF_MBUF; } static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; cb->cb_type = CRYPTO_BUF_UIO; } static __inline void crypto_use_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_buf, buf, len); } static __inline void crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_buf, m); } static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); } static __inline void crypto_use_output_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_obuf, buf, len); } static __inline void crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); } static __inline void crypto_use_output_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_obuf, uio); } #define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 #define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT) #define CRYPTO_OP_COMPUTE_DIGEST 0x0 #define CRYPTO_OP_VERIFY_DIGEST 0x2 #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ uint32_t krp_hid; /* device used */ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ void (*krp_callback)(struct cryptkop *); struct cryptocap *krp_cap; }; uint32_t crypto_ses2hid(crypto_session_t crypto_session); uint32_t crypto_ses2caps(crypto_session_t crypto_session); void *crypto_get_driver_session(crypto_session_t crypto_session); const struct crypto_session_params *crypto_get_params( crypto_session_t crypto_session); struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp); struct enc_xform *crypto_cipher(const struct crypto_session_params *csp); MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *params, int hard); extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ +#define CRYPTOCAP_F_ACCEL_SOFTWARE 0x08000000 extern int32_t crypto_get_driverid(device_t dev, size_t session_size, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_kregister(u_int32_t, int, u_int32_t); extern int crypto_unregister_all(u_int32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(u_int32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(crypto_session_t cses, int how); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ #ifdef SYSCTL_DECL SYSCTL_DECL(_kern_crypto); #endif /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); /* * Crypto-related utility routines used mainly by drivers. * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, void *, u_int), void *arg); void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, void *, u_int), void *arg); void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, size_t len); size_t crypto_buffer_len(struct crypto_buffer *cb); void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb); void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, const void *vsrc); void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst); void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, void *vdst); static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { const struct crypto_session_params *csp; csp = crypto_get_params(crp->crp_session); if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) memcpy(iv, crp->crp_iv, csp->csp_ivlen); else crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); } #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */