Index: head/share/man/man4/crypto.4 =================================================================== --- head/share/man/man4/crypto.4 (revision 366843) +++ head/share/man/man4/crypto.4 (revision 366844) @@ -1,461 +1,466 @@ .\" $NetBSD: crypto.4,v 1.24 2014/01/27 21:23:59 pgoyette Exp $ .\" .\" Copyright (c) 2008 The NetBSD Foundation, Inc. .\" Copyright (c) 2014 The FreeBSD Foundation .\" All rights reserved. .\" .\" Portions of this documentation were written by John-Mark Gurney .\" under sponsorship of the FreeBSD Foundation and .\" Rubicon Communications, LLC (Netgate). .\" .\" This code is derived from software contributed to The NetBSD Foundation .\" by Coyote Point Systems, Inc. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS .\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" .\" .\" Copyright (c) 2004 .\" Jonathan Stone . All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY Jonathan Stone AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL Jonathan Stone OR THE VOICES IN HIS HEAD .\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd May 11, 2020 +.Dd October 19, 2020 .Dt CRYPTO 4 .Os .Sh NAME .Nm crypto , .Nm cryptodev .Nd user-mode access to hardware-accelerated cryptography .Sh SYNOPSIS .Cd device crypto .Cd device cryptodev .Pp .In sys/ioctl.h .In sys/time.h .In crypto/cryptodev.h .Sh DESCRIPTION The .Nm driver gives user-mode applications access to hardware-accelerated cryptographic transforms as implemented by the .Xr crypto 9 in-kernel interface. .Pp The .Pa /dev/crypto special device provides an .Xr ioctl 2 based interface. User-mode applications open the special device and then issue .Xr ioctl 2 calls on the descriptor. User-mode access to .Pa /dev/crypto is controlled by two .Xr sysctl 8 variables: .Ic kern.userasymcrypto and .Ic kern.cryptodevallowsoft . .Pp The .Nm device provides two distinct modes of operation: one mode for symmetric-keyed cryptographic requests and digests, and a second mode for both asymmetric-key (public-key/private-key) requests and modular arithmetic (for Diffie-Hellman key exchange and other cryptographic protocols). The two modes are described separately below. +.Sh DEPRECATION NOTICE +The asymmetric-key operations supported by this interface will not be +present in +.Fx 14.0 +and later. .Sh THEORY OF OPERATION Regardless of whether symmetric-key or asymmetric-key operations are to be performed, use of the device requires a basic series of steps: .Bl -enum .It Open the .Pa /dev/crypto device. .It Create a new cryptography file descriptor via .Dv CRIOGET to use for all subsequent .Xr ioctl 2 commands. .It Close the .Pa /dev/crypto device. .It If any symmetric-keyed cryptographic or digest operations will be performed, create a session with .Dv CIOCGSESSION . Most applications will require at least one symmetric session. Since cipher and MAC keys are tied to sessions, many applications will require more. Asymmetric operations do not use sessions. .It Submit requests, synchronously with .Dv CIOCCRYPT (symmetric), .Dv CIOCCRYPTAEAD (symmetric), or .Dv CIOCKEY (asymmetric). .It Optionally destroy a session with .Dv CIOCFSESSION . .It Close the cryptography file descriptor with .Xr close 2 . This will automatically close any remaining sessions associated with the file desriptor. .El .Sh SYMMETRIC-KEY OPERATION The symmetric-key operation mode provides a context-based API to traditional symmetric-key encryption (or privacy) algorithms, or to keyed and unkeyed one-way hash (HMAC and MAC) algorithms. The symmetric-key mode also permits encrypt-then-authenticate fused operation, where the hardware performs both a privacy algorithm and an integrity-check algorithm in a single pass over the data: either a fused encrypt/HMAC-generate operation, or a fused HMAC-verify/decrypt operation. .Pp To use symmetric mode, you must first create a session specifying the algorithm(s) and key(s) to use; then issue encrypt or decrypt requests against the session. .Ss Algorithms For a list of supported algorithms, see .Xr crypto 7 and .Xr crypto 9 . .Ss IOCTL Request Descriptions .\" .Bl -tag -width CIOCGSESSION .\" .It Dv CRIOGET Fa int *fd Clone the fd argument to .Xr ioctl 2 , yielding a new file descriptor for the creation of sessions. .\" .It Dv CIOCFINDDEV Fa struct crypt_find_op *fop .Bd -literal struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; .Ed If .Fa crid is -1, then find the driver named .Fa name and return the id in .Fa crid . If .Fa crid is not -1, return the name of the driver with .Fa crid in .Fa name . In either case, if the driver is not found, .Dv ENOENT is returned. .It Dv CIOCGSESSION Fa struct session_op *sessp .Bd -literal struct session_op { u_int32_t cipher; /* e.g. CRYPTO_DES_CBC */ u_int32_t mac; /* e.g. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ const void *key; int mackeylen; /* mac key */ const void *mackey; u_int32_t ses; /* returns: ses # */ }; .Ed Create a new cryptographic session on a file descriptor for the device; that is, a persistent object specific to the chosen privacy algorithm, integrity algorithm, and keys specified in .Fa sessp . The special value 0 for either privacy or integrity is reserved to indicate that the indicated operation (privacy or integrity) is not desired for this session. .Pp Multiple sessions may be bound to a single file descriptor. The session ID returned in .Fa sessp-\*[Gt]ses is supplied as a required field in the symmetric-operation structure .Fa crypt_op for future encryption or hashing requests. .\" .Pp .\" This implementation will never return a session ID of 0 for a successful .\" creation of a session, which is a .\" .Nx .\" extension. .Pp For non-zero symmetric-key privacy algorithms, the privacy algorithm must be specified in .Fa sessp-\*[Gt]cipher , the key length in .Fa sessp-\*[Gt]keylen , and the key value in the octets addressed by .Fa sessp-\*[Gt]key . .Pp For keyed one-way hash algorithms, the one-way hash must be specified in .Fa sessp-\*[Gt]mac , the key length in .Fa sessp-\*[Gt]mackey , and the key value in the octets addressed by .Fa sessp-\*[Gt]mackeylen . .\" .Pp Support for a specific combination of fused privacy and integrity-check algorithms depends on whether the underlying hardware supports that combination. Not all combinations are supported by all hardware, even if the hardware supports each operation as a stand-alone non-fused operation. .It Dv CIOCGSESSION2 Fa struct session2_op *sessp .Bd -literal struct session2_op { u_int32_t cipher; /* e.g. CRYPTO_DES_CBC */ u_int32_t mac; /* e.g. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ const void *key; int mackeylen; /* mac key */ const void *mackey; u_int32_t ses; /* returns: ses # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; .Ed This request is similar to CIOGSESSION except that .Fa sessp-\*[Gt]crid requests either a specific crypto device or a class of devices (software vs hardware). The .Fa sessp-\*[Gt]pad field must be initialized to zero. .It Dv CIOCCRYPT Fa struct crypt_op *cr_op .Bd -literal struct crypt_op { u_int32_t ses; u_int16_t op; /* e.g. COP_ENCRYPT */ u_int16_t flags; u_int len; caddr_t src, dst; caddr_t mac; /* must be large enough for result */ caddr_t iv; }; .Ed Request a symmetric-key (or hash) operation. To encrypt, set .Fa cr_op-\*[Gt]op to .Dv COP_ENCRYPT . To decrypt, set .Fa cr_op-\*[Gt]op to .Dv COP_DECRYPT . The field .Fa cr_op-\*[Gt]len supplies the length of the input buffer; the fields .Fa cr_op-\*[Gt]src , .Fa cr_op-\*[Gt]dst , .Fa cr_op-\*[Gt]mac , .Fa cr_op-\*[Gt]iv supply the addresses of the input buffer, output buffer, one-way hash, and initialization vector, respectively. .Pp If a session is using either fused encrypt-then-authenticate or an AEAD algorithm, decryption operations require the associated hash as an input. If the hash is incorrect, the operation will fail with .Dv EBADMSG and the output buffer will remain unchanged. .It Dv CIOCCRYPTAEAD Fa struct crypt_aead *cr_aead .Bd -literal struct crypt_aead { u_int32_t ses; u_int16_t op; /* e.g. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; caddr_t src, dst; caddr_t aad; caddr_t tag; /* must be large enough for result */ caddr_t iv; }; .Ed The .Dv CIOCCRYPTAEAD is similar to the .Dv CIOCCRYPT but provides additional data in .Fa cr_aead-\*[Gt]aad to include in the authentication mode. .It Dv CIOCFSESSION Fa u_int32_t ses_id Destroys the session identified by .Fa ses_id . .El .\" .Sh ASYMMETRIC-KEY OPERATION .Ss Asymmetric-key algorithms Contingent upon hardware support, the following asymmetric (public-key/private-key; or key-exchange subroutine) operations may also be available: .Pp .Bl -column "CRK_DH_COMPUTE_KEY" "Input parameter" "Output parameter" -offset indent -compact .It Em "Algorithm" Ta "Input parameter" Ta "Output parameter" .It Em " " Ta "Count" Ta "Count" .It Dv CRK_MOD_EXP Ta 3 Ta 1 .It Dv CRK_MOD_EXP_CRT Ta 6 Ta 1 .It Dv CRK_DSA_SIGN Ta 5 Ta 2 .It Dv CRK_DSA_VERIFY Ta 7 Ta 0 .It Dv CRK_DH_COMPUTE_KEY Ta 3 Ta 1 .El .Pp See below for discussion of the input and output parameter counts. .Ss Asymmetric-key commands .Bl -tag -width CIOCKEY .It Dv CIOCASYMFEAT Fa int *feature_mask Returns a bitmask of supported asymmetric-key operations. Each of the above-listed asymmetric operations is present if and only if the bit position numbered by the code for that operation is set. For example, .Dv CRK_MOD_EXP is available if and only if the bit .Pq 1 \*[Lt]\*[Lt] Dv CRK_MOD_EXP is set. .It Dv CIOCKEY Fa struct crypt_kop *kop .Bd -literal struct crypt_kop { u_int crk_op; /* e.g. CRK_MOD_EXP */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input params */ u_short crk_oparams; /* # of output params */ u_int crk_pad1; struct crparam crk_param[CRK_MAXPARAM]; }; /* Bignum parameter, in packed bytes. */ struct crparam { void * crp_p; u_int crp_nbits; }; .Ed Performs an asymmetric-key operation from the list above. The specific operation is supplied in .Fa kop-\*[Gt]crk_op ; final status for the operation is returned in .Fa kop-\*[Gt]crk_status . The number of input arguments and the number of output arguments is specified in .Fa kop-\*[Gt]crk_iparams and .Fa kop-\*[Gt]crk_iparams , respectively. The field .Fa crk_param[] must be filled in with exactly .Fa kop-\*[Gt]crk_iparams + kop-\*[Gt]crk_oparams arguments, each encoded as a .Fa struct crparam (address, bitlength) pair. .Pp The semantics of these arguments are currently undocumented. .El .Sh SEE ALSO .Xr aesni 4 , .Xr hifn 4 , .Xr ipsec 4 , .Xr padlock 4 , .Xr safe 4 , .Xr crypto 7 , .Xr geli 8 , .Xr crypto 9 .Sh HISTORY The .Nm driver first appeared in .Ox 3.0 . The .Nm driver was imported to .Fx 5.0 . .Sh BUGS Error checking and reporting is weak. .Pp The values specified for symmetric-key key sizes to .Dv CIOCGSESSION must exactly match the values expected by .Xr opencrypto 9 . The output buffer and MAC buffers supplied to .Dv CIOCCRYPT must follow whether privacy or integrity algorithms were specified for session: if you request a .No non- Ns Dv NULL algorithm, you must supply a suitably-sized buffer. .Pp The scheme for passing arguments for asymmetric requests is baroque. .Pp .Dv CRIOGET should not exist. It should be possible to use the .Dv CIOC Ns \&* commands directly on a .Pa /dev/crypto file descriptor. Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 366843) +++ head/sys/opencrypto/crypto.c (revision 366844) @@ -1,2282 +1,2283 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; u_int32_t cc_sessions; /* (d) # of sessions */ u_int32_t cc_koperations; /* (d) # os asym operations */ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; void *softc; struct crypto_session_params csp; }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ u_int32_t reorder_ops; /* total ordered sym jobs received */ u_int32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; static uma_zone_t cryptoses_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, cryptostats, nitems(cryptostats), "Crypto system statistics"); #define CRYPTOSTAT_INC(stat) do { \ counter_u64_add( \ cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 1); \ } while (0) static void cryptostats_init(void *arg __unused) { COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); } SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); static void cryptostats_fini(void *arg __unused) { COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); } SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, NULL); /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof(struct cryptop), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptoses_zone = uma_zcreate("crypto_session", sizeof(struct crypto_session), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &crypto_tq); taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = mallocarray(crypto_workers_num, sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); explicit_bzero(hmac_key, sizeof(hmac_key)); } void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptoses_zone != NULL) uma_zdestroy(cryptoses_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session->softc); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); default: return (NULL); } } static struct cryptocap * crypto_checkdriver(u_int32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static enum alg_type { ALG_NONE = 0, ALG_CIPHER, ALG_DIGEST, ALG_KEYED_DIGEST, ALG_COMPRESSION, ALG_AEAD } alg_types[] = { [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CBC] = ALG_CIPHER, [CRYPTO_SHA1] = ALG_DIGEST, [CRYPTO_NULL_HMAC] = ALG_DIGEST, [CRYPTO_NULL_CBC] = ALG_CIPHER, [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, [CRYPTO_AES_XTS] = ALG_CIPHER, [CRYPTO_AES_ICM] = ALG_CIPHER, [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, [CRYPTO_CHACHA20] = ALG_CIPHER, [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160] = ALG_DIGEST, [CRYPTO_SHA2_224] = ALG_DIGEST, [CRYPTO_SHA2_256] = ALG_DIGEST, [CRYPTO_SHA2_384] = ALG_DIGEST, [CRYPTO_SHA2_512] = ALG_DIGEST, [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_16] = ALG_AEAD, }; static enum alg_type alg_type(int alg) { if (alg < nitems(alg_types)) return (alg_types[alg]); return (ALG_NONE); } static bool alg_is_compression(int alg) { return (alg_type(alg) == ALG_COMPRESSION); } static bool alg_is_cipher(int alg) { return (alg_type(alg) == ALG_CIPHER); } static bool alg_is_digest(int alg) { return (alg_type(alg) == ALG_DIGEST || alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_keyed_digest(int alg) { return (alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_aead(int alg) { return (alg_type(alg) == ALG_AEAD); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); /* IV is optional for digests (e.g. GMAC). */ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); /* * XXX: Would be nice to have a better way to get this * value. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen > 16) return (false); break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; zfree(cses->softc, M_CRYPTO_DATA); uma_zfree(cryptoses_zone, cses); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); res->cap = cap; res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->csp = *csp; /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); + gone_in_dev(cap->cc_dev, 14, "asymmetric crypto"); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(u_int32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } size_t crypto_buffer_len(struct crypto_buffer *cb) { switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: return (cb->cb_buf_len); case CRYPTO_BUF_MBUF: if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); case CRYPTO_BUF_VMPAGE: return (cb->cb_vm_page_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: return (0); } } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void cb_sanity(struct crypto_buffer *cb, const char *name) { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); break; case CRYPTO_BUF_VMPAGE: KASSERT(CRYPTO_HAS_VMPAGE, ("incoming crp uses dmap on supported arch")); KASSERT(cb->cb_vm_page_len >= 0, ("incoming crp with -ve %s buffer length", name)); KASSERT(cb->cb_vm_page_offset >= 0, ("incoming crp with -ve %s buffer offset", name)); KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, ("incoming crp with %s buffer offset greater than page size" , name)); break; default: break; } } static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; struct crypto_buffer *out; size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; cb_sanity(&crp->crp_buf, "input"); ilen = crypto_buffer_len(&crp->crp_buf); olen = ilen; out = NULL; if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { cb_sanity(&crp->crp_obuf, "output"); out = &crp->crp_obuf; olen = crypto_buffer_len(out); } } else KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, ("incoming crp with separate output buffer " "but no session support")); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("GCM without a separate IV")); if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("CCM without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_aad == NULL) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, ("session doesn't support separate AAD buffer")); KASSERT(crp->crp_aad_start == 0, ("separate AAD buffer with non-zero AAD start")); KASSERT(crp->crp_aad_length != 0, ("separate AAD buffer with zero length")); } } else { KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, ("IV outside buffer length")); } /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= ilen, ("payload outside input buffer")); if (out == NULL) { KASSERT(crp->crp_payload_output_start == 0, ("payload output start non-zero without output buffer")); } else { KASSERT(crp->crp_payload_output_start < olen, ("invalid payload output start")); KASSERT(crp->crp_payload_output_start + crp->crp_payload_length <= olen, ("payload outside output buffer")); } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; else len = olen; KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif CRYPTOSTAT_INC(cs_ops); crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crp->crp_session->cap; if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; CRYPTOSTAT_INC(cs_kops); krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap->cc_dev == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_destroyreq(struct cryptop *crp) { #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; crypto_destroyreq(crp); uma_zfree(cryptop_zone, crp); } static void _crypto_initreq(struct cryptop *crp, crypto_session_t cses) { crp->crp_session = cses; } void crypto_initreq(struct cryptop *crp, crypto_session_t cses) { memset(crp, 0, sizeof(*crp)); _crypto_initreq(crp, cses); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); if (crp != NULL) _crypto_initreq(crp, cses); return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) CRYPTOSTAT_INC(cs_errs); /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) CRYPTOSTAT_INC(cs_kerrs); CRYPTO_DRIVER_LOCK(); cap = krp->krp_cap; KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); CRYPTOSTAT_INC(cs_blocks); } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); CRYPTOSTAT_INC(cs_kblocks); } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; CRYPTOSTAT_INC(cs_intrs); } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) crpt->crp_callback(crpt); if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; CRYPTOSTAT_INC(cs_rets); } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.c =================================================================== --- head/sys/opencrypto/cryptodev.c (revision 366843) +++ head/sys/opencrypto/cryptodev.c (revision 366844) @@ -1,1694 +1,1708 @@ /* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ /*- * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(opencrypto); SDT_PROBE_DEFINE1(opencrypto, dev, ioctl, error, "int"/*line number*/); #ifdef COMPAT_FREEBSD32 #include #include struct session_op32 { u_int32_t cipher; u_int32_t mac; u_int32_t keylen; u_int32_t key; int mackeylen; u_int32_t mackey; u_int32_t ses; }; struct session2_op32 { u_int32_t cipher; u_int32_t mac; u_int32_t keylen; u_int32_t key; int mackeylen; u_int32_t mackey; u_int32_t ses; int crid; int pad[4]; }; struct crypt_op32 { u_int32_t ses; u_int16_t op; u_int16_t flags; u_int len; u_int32_t src, dst; u_int32_t mac; u_int32_t iv; }; struct crypt_aead32 { u_int32_t ses; u_int16_t op; u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; u_int32_t src; u_int32_t dst; u_int32_t aad; u_int32_t tag; u_int32_t iv; }; struct crparam32 { u_int32_t crp_p; u_int crp_nbits; }; struct crypt_kop32 { u_int crk_op; u_int crk_status; u_short crk_iparams; u_short crk_oparams; u_int crk_crid; struct crparam32 crk_param[CRK_MAXPARAM]; }; #define CIOCGSESSION32 _IOWR('c', 101, struct session_op32) #define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32) #define CIOCKEY32 _IOWR('c', 104, struct crypt_kop32) #define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32) #define CIOCKEY232 _IOWR('c', 107, struct crypt_kop32) #define CIOCCRYPTAEAD32 _IOWR('c', 109, struct crypt_aead32) static void session_op_from_32(const struct session_op32 *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTRIN_CP(*from, *to, key); CP(*from, *to, mackeylen); PTRIN_CP(*from, *to, mackey); CP(*from, *to, ses); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_from_32(const struct session2_op32 *from, struct session2_op *to) { session_op_from_32((const struct session_op32 *)from, to); CP(*from, *to, crid); } static void session_op_to_32(const struct session2_op *from, struct session_op32 *to) { CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTROUT_CP(*from, *to, key); CP(*from, *to, mackeylen); PTROUT_CP(*from, *to, mackey); CP(*from, *to, ses); } static void session2_op_to_32(const struct session2_op *from, struct session2_op32 *to) { session_op_to_32(from, (struct session_op32 *)to); CP(*from, *to, crid); } static void crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, mac); PTRIN_CP(*from, *to, iv); } static void crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, mac); PTROUT_CP(*from, *to, iv); } static void crypt_aead_from_32(const struct crypt_aead32 *from, struct crypt_aead *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, aad); PTRIN_CP(*from, *to, tag); PTRIN_CP(*from, *to, iv); } static void crypt_aead_to_32(const struct crypt_aead *from, struct crypt_aead32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); CP(*from, *to, aadlen); CP(*from, *to, ivlen); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, aad); PTROUT_CP(*from, *to, tag); PTROUT_CP(*from, *to, iv); } static void crparam_from_32(const struct crparam32 *from, struct crparam *to) { PTRIN_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crparam_to_32(const struct crparam *from, struct crparam32 *to) { PTROUT_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crypt_kop_from_32(const struct crypt_kop32 *from, struct crypt_kop *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_from_32(&from->crk_param[i], &to->crk_param[i]); } static void crypt_kop_to_32(const struct crypt_kop *from, struct crypt_kop32 *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_to_32(&from->crk_param[i], &to->crk_param[i]); } #endif static void session2_op_from_op(const struct session_op *from, struct session2_op *to) { memset(to, 0, sizeof(*to)); memcpy(to, from, sizeof(*from)); to->crid = CRYPTOCAP_F_HARDWARE; } static void session2_op_to_op(const struct session2_op *from, struct session_op *to) { memcpy(to, from, sizeof(*to)); } struct csession { TAILQ_ENTRY(csession) next; crypto_session_t cses; volatile u_int refs; u_int32_t ses; struct mtx lock; /* for op submission */ struct enc_xform *txform; int hashsize; int ivsize; int mode; void *key; void *mackey; }; struct cryptop_data { struct csession *cse; char *buf; char *obuf; char *aad; bool done; }; struct fcrypt { TAILQ_HEAD(csessionlist, csession) csessions; int sesn; struct mtx lock; }; static bool use_outputbuffers; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_use_output, CTLFLAG_RW, &use_outputbuffers, 0, "Use separate output buffers for /dev/crypto requests."); static bool use_separate_aad; SYSCTL_BOOL(_kern_crypto, OID_AUTO, cryptodev_separate_aad, CTLFLAG_RW, &use_separate_aad, 0, "Use separate AAD buffer for /dev/crypto requests."); +static struct timeval warninterval = { .tv_sec = 60, .tv_usec = 0 }; +SYSCTL_TIMEVAL_SEC(_kern, OID_AUTO, cryptodev_warn_interval, CTLFLAG_RW, + &warninterval, + "Delay in seconds between warnings of deprecated /dev/crypto algorithms"); + static int cryptof_ioctl(struct file *, u_long, void *, struct ucred *, struct thread *); static int cryptof_stat(struct file *, struct stat *, struct ucred *, struct thread *); static int cryptof_close(struct file *, struct thread *); static int cryptof_fill_kinfo(struct file *, struct kinfo_file *, struct filedesc *); static struct fileops cryptofops = { .fo_read = invfo_rdwr, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_ioctl = cryptof_ioctl, .fo_poll = invfo_poll, .fo_kqfilter = invfo_kqfilter, .fo_stat = cryptof_stat, .fo_close = cryptof_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = cryptof_fill_kinfo, }; static struct csession *csefind(struct fcrypt *, u_int); static bool csedelete(struct fcrypt *, u_int); static struct csession *csecreate(struct fcrypt *, crypto_session_t, struct crypto_session_params *, struct enc_xform *, void *, struct auth_hash *, void *); static void csefree(struct csession *); static int cryptodev_op(struct csession *, struct crypt_op *, struct ucred *, struct thread *td); static int cryptodev_aead(struct csession *, struct crypt_aead *, struct ucred *, struct thread *); static int cryptodev_key(struct crypt_kop *); static int cryptodev_find(struct crypt_find_op *); /* * Check a crypto identifier to see if it requested * a software device/driver. This can be done either * by device name/class or through search constraints. */ static int checkforsoftware(int *cridp) { int crid; crid = *cridp; if (!crypto_devallowsoft) { if (crid & CRYPTOCAP_F_SOFTWARE) { if (crid & CRYPTOCAP_F_HARDWARE) { *cridp = CRYPTOCAP_F_HARDWARE; return 0; } return EINVAL; } if ((crid & CRYPTOCAP_F_HARDWARE) == 0 && (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0) return EINVAL; } return 0; } /* ARGSUSED */ static int cryptof_ioctl( struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { + static struct timeval keywarn, featwarn; struct crypto_session_params csp; struct fcrypt *fcr = fp->f_data; struct csession *cse; struct session2_op *sop; struct crypt_op *cop; struct crypt_aead *caead; struct enc_xform *txform = NULL; struct auth_hash *thash = NULL; void *key = NULL; void *mackey = NULL; struct crypt_kop *kop; crypto_session_t cses; u_int32_t ses; int error = 0, crid; union { struct session2_op sopc; #ifdef COMPAT_FREEBSD32 struct crypt_op copc; struct crypt_aead aeadc; struct crypt_kop kopc; #endif } thunk; #ifdef COMPAT_FREEBSD32 u_long cmd32; void *data32; cmd32 = 0; data32 = NULL; switch (cmd) { case CIOCGSESSION32: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION; data = &thunk.sopc; session_op_from_32((struct session_op32 *)data32, &thunk.sopc); break; case CIOCGSESSION232: cmd32 = cmd; data32 = data; cmd = CIOCGSESSION2; data = &thunk.sopc; session2_op_from_32((struct session2_op32 *)data32, &thunk.sopc); break; case CIOCCRYPT32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPT; data = &thunk.copc; crypt_op_from_32((struct crypt_op32 *)data32, &thunk.copc); break; case CIOCCRYPTAEAD32: cmd32 = cmd; data32 = data; cmd = CIOCCRYPTAEAD; data = &thunk.aeadc; crypt_aead_from_32((struct crypt_aead32 *)data32, &thunk.aeadc); break; case CIOCKEY32: case CIOCKEY232: cmd32 = cmd; data32 = data; if (cmd == CIOCKEY32) cmd = CIOCKEY; else cmd = CIOCKEY2; data = &thunk.kopc; crypt_kop_from_32((struct crypt_kop32 *)data32, &thunk.kopc); break; } #endif switch (cmd) { case CIOCGSESSION: case CIOCGSESSION2: if (cmd == CIOCGSESSION) { session2_op_from_op(data, &thunk.sopc); sop = &thunk.sopc; } else sop = (struct session2_op *)data; switch (sop->cipher) { case 0: break; case CRYPTO_AES_CBC: txform = &enc_xform_rijndael128; break; case CRYPTO_AES_XTS: txform = &enc_xform_aes_xts; break; case CRYPTO_NULL_CBC: txform = &enc_xform_null; break; case CRYPTO_CAMELLIA_CBC: txform = &enc_xform_camellia; break; case CRYPTO_AES_ICM: txform = &enc_xform_aes_icm; break; case CRYPTO_AES_NIST_GCM_16: txform = &enc_xform_aes_nist_gcm; break; case CRYPTO_CHACHA20: txform = &enc_xform_chacha20; break; case CRYPTO_AES_CCM_16: txform = &enc_xform_ccm; break; default: CRYPTDEB("invalid cipher"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } switch (sop->mac) { case 0: break; case CRYPTO_POLY1305: thash = &auth_hash_poly1305; break; case CRYPTO_SHA1_HMAC: thash = &auth_hash_hmac_sha1; break; case CRYPTO_SHA2_224_HMAC: thash = &auth_hash_hmac_sha2_224; break; case CRYPTO_SHA2_256_HMAC: thash = &auth_hash_hmac_sha2_256; break; case CRYPTO_SHA2_384_HMAC: thash = &auth_hash_hmac_sha2_384; break; case CRYPTO_SHA2_512_HMAC: thash = &auth_hash_hmac_sha2_512; break; case CRYPTO_RIPEMD160_HMAC: thash = &auth_hash_hmac_ripemd_160; break; #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: /* Should always be paired with GCM. */ if (sop->cipher != CRYPTO_AES_NIST_GCM_16) { CRYPTDEB("GMAC without GCM"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case CRYPTO_AES_NIST_GMAC: switch (sop->mackeylen * 8) { case 128: thash = &auth_hash_nist_gmac_aes_128; break; case 192: thash = &auth_hash_nist_gmac_aes_192; break; case 256: thash = &auth_hash_nist_gmac_aes_256; break; default: CRYPTDEB("invalid GMAC key length"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_AES_CCM_CBC_MAC: switch (sop->mackeylen) { case 16: thash = &auth_hash_ccm_cbc_mac_128; break; case 24: thash = &auth_hash_ccm_cbc_mac_192; break; case 32: thash = &auth_hash_ccm_cbc_mac_256; break; default: CRYPTDEB("Invalid CBC MAC key size %d", sop->keylen); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CRYPTO_SHA1: thash = &auth_hash_sha1; break; case CRYPTO_SHA2_224: thash = &auth_hash_sha2_224; break; case CRYPTO_SHA2_256: thash = &auth_hash_sha2_256; break; case CRYPTO_SHA2_384: thash = &auth_hash_sha2_384; break; case CRYPTO_SHA2_512: thash = &auth_hash_sha2_512; break; case CRYPTO_NULL_HMAC: thash = &auth_hash_null; break; case CRYPTO_BLAKE2B: thash = &auth_hash_blake2b; break; case CRYPTO_BLAKE2S: thash = &auth_hash_blake2s; break; default: CRYPTDEB("invalid mac"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } if (txform == NULL && thash == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } memset(&csp, 0, sizeof(csp)); if (use_outputbuffers) csp.csp_flags |= CSP_F_SEPARATE_OUTPUT; if (sop->cipher == CRYPTO_AES_NIST_GCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (sop->cipher == CRYPTO_AES_CCM_16) { switch (sop->mac) { #ifdef COMPAT_FREEBSD12 case CRYPTO_AES_CCM_CBC_MAC: if (sop->keylen != sop->mackeylen) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } thash = NULL; break; #endif case 0: break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csp.csp_mode = CSP_MODE_AEAD; } else if (txform && thash) csp.csp_mode = CSP_MODE_ETA; else if (txform) csp.csp_mode = CSP_MODE_CIPHER; else csp.csp_mode = CSP_MODE_DIGEST; switch (csp.csp_mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: if (use_separate_aad) csp.csp_flags |= CSP_F_SEPARATE_AAD; break; } if (txform) { csp.csp_cipher_alg = txform->type; csp.csp_cipher_klen = sop->keylen; if (sop->keylen > txform->maxkey || sop->keylen < txform->minkey) { CRYPTDEB("invalid cipher parameters"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } key = malloc(csp.csp_cipher_klen, M_XDATA, M_WAITOK); error = copyin(sop->key, key, csp.csp_cipher_klen); if (error) { CRYPTDEB("invalid key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_cipher_key = key; csp.csp_ivlen = txform->ivsize; } if (thash) { csp.csp_auth_alg = thash->type; csp.csp_auth_klen = sop->mackeylen; if (sop->mackeylen > thash->keysize || sop->mackeylen < 0) { CRYPTDEB("invalid mac key length"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (csp.csp_auth_klen) { mackey = malloc(csp.csp_auth_klen, M_XDATA, M_WAITOK); error = copyin(sop->mackey, mackey, csp.csp_auth_klen); if (error) { CRYPTDEB("invalid mac key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } csp.csp_auth_key = mackey; } if (csp.csp_auth_alg == CRYPTO_AES_NIST_GMAC) csp.csp_ivlen = AES_GCM_IV_LEN; if (csp.csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC) csp.csp_ivlen = AES_CCM_IV_LEN; } crid = sop->crid; error = checkforsoftware(&crid); if (error) { CRYPTDEB("checkforsoftware"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = crypto_newsession(&cses, &csp, crid); if (error) { CRYPTDEB("crypto_newsession"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } cse = csecreate(fcr, cses, &csp, txform, key, thash, mackey); if (cse == NULL) { crypto_freesession(cses); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); CRYPTDEB("csecreate"); goto bail; } sop->ses = cse->ses; /* return hardware/driver id */ sop->crid = crypto_ses2hid(cse->cses); bail: if (error) { free(key, M_XDATA); free(mackey, M_XDATA); } if (cmd == CIOCGSESSION && error == 0) session2_op_to_op(sop, data); break; case CIOCFSESSION: ses = *(u_int32_t *)data; if (!csedelete(fcr, ses)) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } break; case CIOCCRYPT: cop = (struct crypt_op *)data; cse = csefind(fcr, cop->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_op(cse, cop, active_cred, td); csefree(cse); break; case CIOCKEY: case CIOCKEY2: + if (ratecheck(&keywarn, &warninterval)) + gone_in(14, + "Asymmetric crypto operations via /dev/crypto"); + if (!crypto_userasymcrypto) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EPERM); /* XXX compat? */ } kop = (struct crypt_kop *)data; if (cmd == CIOCKEY) { /* NB: crypto core enforces s/w driver use */ kop->crk_crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; } mtx_lock(&Giant); error = cryptodev_key(kop); mtx_unlock(&Giant); break; case CIOCASYMFEAT: + if (ratecheck(&featwarn, &warninterval)) + gone_in(14, + "Asymmetric crypto features via /dev/crypto"); + if (!crypto_userasymcrypto) { /* * NB: if user asym crypto operations are * not permitted return "no algorithms" * so well-behaved applications will just * fallback to doing them in software. */ *(int *)data = 0; } else { error = crypto_getfeat((int *)data); if (error) SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); } break; case CIOCFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CIOCCRYPTAEAD: caead = (struct crypt_aead *)data; cse = csefind(fcr, caead->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_aead(cse, caead, active_cred, td); csefree(cse); break; default: error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); break; } #ifdef COMPAT_FREEBSD32 switch (cmd32) { case CIOCGSESSION32: if (error == 0) session_op_to_32(data, data32); break; case CIOCGSESSION232: if (error == 0) session2_op_to_32(data, data32); break; case CIOCCRYPT32: if (error == 0) crypt_op_to_32(data, data32); break; case CIOCCRYPTAEAD32: if (error == 0) crypt_aead_to_32(data, data32); break; case CIOCKEY32: case CIOCKEY232: crypt_kop_to_32(data, data32); break; } #endif return (error); } static int cryptodev_cb(struct cryptop *); static struct cryptop_data * cod_alloc(struct csession *cse, size_t aad_len, size_t len, struct thread *td) { struct cryptop_data *cod; cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO); cod->cse = cse; if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_AAD) { if (aad_len != 0) cod->aad = malloc(aad_len, M_XDATA, M_WAITOK); cod->buf = malloc(len, M_XDATA, M_WAITOK); } else cod->buf = malloc(aad_len + len, M_XDATA, M_WAITOK); if (crypto_get_params(cse->cses)->csp_flags & CSP_F_SEPARATE_OUTPUT) cod->obuf = malloc(len, M_XDATA, M_WAITOK); return (cod); } static void cod_free(struct cryptop_data *cod) { free(cod->aad, M_XDATA); free(cod->obuf, M_XDATA); free(cod->buf, M_XDATA); free(cod, M_XDATA); } static int cryptodev_op( struct csession *cse, struct crypt_op *cop, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; int error; if (cop->len > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform) { if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } if (cop->mac && cse->hashsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (cop->flags & COP_F_CIPHER_FIRST) { if (cop->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, 0, cop->len + cse->hashsize, td); crp = crypto_getreq(cse->cses, M_WAITOK); error = copyin(cop->src, cod->buf, cop->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_start = 0; crp->crp_payload_length = cop->len; if (cse->hashsize) crp->crp_digest_start = cop->len; switch (cse->mode) { case CSP_MODE_COMPRESS: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_COMPRESS; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECOMPRESS; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_CIPHER: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_DIGEST: switch (cop->op) { case 0: case COP_ENCRYPT: case COP_DECRYPT: crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; if (cod->obuf != NULL) crp->crp_digest_start = 0; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; case CSP_MODE_ETA: switch (cop->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, cop->len + cse->hashsize); if (cod->obuf) crypto_use_output_buf(crp, cod->obuf, cop->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (cop->iv) { if (cse->ivsize == 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } error = copyin(cop->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else if (cse->ivsize != 0) { crp->crp_iv_start = 0; crp->crp_payload_start += cse->ivsize; crp->crp_payload_length -= cse->ivsize; cop->dst += cse->ivsize; } if (cop->mac != NULL && crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(cop->mac, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = crp->crp_etype; goto bail; } if (cop->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, cop->dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if (cop->mac != NULL && (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, cop->mac, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_aead( struct csession *cse, struct crypt_aead *caead, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; int error; if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform == NULL || cse->hashsize == 0 || caead->tag == NULL || (caead->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } /* * The COP_F_CIPHER_FIRST flag predates explicit session * modes, but the only way it was used was for EtA so allow it * as long as it is consistent with EtA. */ if (caead->flags & COP_F_CIPHER_FIRST) { if (caead->op != COP_ENCRYPT) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } cod = cod_alloc(cse, caead->aadlen, caead->len + cse->hashsize, td); crp = crypto_getreq(cse->cses, M_WAITOK); if (cod->aad != NULL) error = copyin(caead->aad, cod->aad, caead->aadlen); else error = copyin(caead->aad, cod->buf, caead->aadlen); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_aad = cod->aad; crp->crp_aad_start = 0; crp->crp_aad_length = caead->aadlen; if (cod->aad != NULL) crp->crp_payload_start = 0; else crp->crp_payload_start = caead->aadlen; error = copyin(caead->src, cod->buf + crp->crp_payload_start, caead->len); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_payload_length = caead->len; if (caead->op == COP_ENCRYPT && cod->obuf != NULL) crp->crp_digest_start = crp->crp_payload_output_start + caead->len; else crp->crp_digest_start = crp->crp_payload_start + caead->len; switch (cse->mode) { case CSP_MODE_AEAD: case CSP_MODE_ETA: switch (caead->op) { case COP_ENCRYPT: crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; break; case COP_DECRYPT: crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } break; default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); crypto_use_buf(crp, cod->buf, crp->crp_payload_start + caead->len + cse->hashsize); if (cod->obuf != NULL) crypto_use_output_buf(crp, cod->obuf, caead->len + cse->hashsize); crp->crp_callback = cryptodev_cb; crp->crp_opaque = cod; if (caead->iv) { /* * Permit a 16-byte IV for AES-XTS, but only use the * first 8 bytes as a block number. */ if (cse->mode == CSP_MODE_ETA && caead->ivlen == AES_BLOCK_LEN && cse->ivsize == AES_XTS_IV_LEN) caead->ivlen = AES_XTS_IV_LEN; if (caead->ivlen != cse->ivsize) { error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } error = copyin(caead->iv, crp->crp_iv, cse->ivsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else { crp->crp_iv_start = crp->crp_payload_start; crp->crp_payload_start += cse->ivsize; crp->crp_payload_length -= cse->ivsize; caead->dst += cse->ivsize; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { error = copyin(caead->tag, cod->buf + crp->crp_digest_start, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { error = crp->crp_etype; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (caead->dst != NULL) { error = copyout(cod->obuf != NULL ? cod->obuf : cod->buf + crp->crp_payload_start, caead->dst, crp->crp_payload_length); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) == 0) { error = copyout((cod->obuf != NULL ? cod->obuf : cod->buf) + crp->crp_digest_start, caead->tag, cse->hashsize); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } bail: crypto_freereq(crp); cod_free(cod); return (error); } static int cryptodev_cb(struct cryptop *crp) { struct cryptop_data *cod = crp->crp_opaque; /* * Lock to ensure the wakeup() is not missed by the loops * waiting on cod->done in cryptodev_op() and * cryptodev_aead(). */ mtx_lock(&cod->cse->lock); cod->done = true; mtx_unlock(&cod->cse->lock); wakeup(cod); return (0); } static void cryptodevkey_cb(struct cryptkop *krp) { wakeup_one(krp); } static int cryptodev_key(struct crypt_kop *kop) { struct cryptkop *krp = NULL; int error = EINVAL; int in, out, size, i; if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EFBIG); } in = kop->crk_iparams; out = kop->crk_oparams; switch (kop->crk_op) { case CRK_MOD_EXP: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_MOD_EXP_CRT: if (in == 6 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_SIGN: if (in == 5 && out == 2) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_VERIFY: if (in == 7 && out == 0) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DH_COMPUTE_KEY: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } krp = malloc(sizeof(*krp), M_XDATA, M_WAITOK | M_ZERO); krp->krp_op = kop->crk_op; krp->krp_status = kop->crk_status; krp->krp_iparams = kop->crk_iparams; krp->krp_oparams = kop->crk_oparams; krp->krp_crid = kop->crk_crid; krp->krp_status = 0; krp->krp_callback = cryptodevkey_cb; for (i = 0; i < CRK_MAXPARAM; i++) { if (kop->crk_param[i].crp_nbits > 65536) { /* Limit is the same as in OpenBSD */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; } for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; krp->krp_param[i].crp_p = malloc(size, M_XDATA, M_WAITOK); if (i >= krp->krp_iparams) continue; error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } error = crypto_kdispatch(krp); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } error = tsleep(krp, PSOCK, "crydev", 0); if (error) { /* XXX can this happen? if so, how do we recover? */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } kop->crk_crid = krp->krp_hid; /* device that did the work */ if (krp->krp_status != 0) { error = krp->krp_status; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } fail: if (krp) { kop->crk_status = krp->krp_status; for (i = 0; i < CRK_MAXPARAM; i++) { if (krp->krp_param[i].crp_p) free(krp->krp_param[i].crp_p, M_XDATA); } free(krp, M_XDATA); } return (error); } static int cryptodev_find(struct crypt_find_op *find) { device_t dev; size_t fnlen = sizeof find->name; if (find->crid != -1) { dev = crypto_find_device_byhid(find->crid); if (dev == NULL) return (ENOENT); strncpy(find->name, device_get_nameunit(dev), fnlen); find->name[fnlen - 1] = '\x0'; } else { find->name[fnlen - 1] = '\x0'; find->crid = crypto_find_driver(find->name); if (find->crid == -1) return (ENOENT); } return (0); } /* ARGSUSED */ static int cryptof_stat( struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } /* ARGSUSED */ static int cryptof_close(struct file *fp, struct thread *td) { struct fcrypt *fcr = fp->f_data; struct csession *cse; while ((cse = TAILQ_FIRST(&fcr->csessions))) { TAILQ_REMOVE(&fcr->csessions, cse, next); KASSERT(cse->refs == 1, ("%s: crypto session %p with %d refs", __func__, cse, cse->refs)); csefree(cse); } free(fcr, M_XDATA); fp->f_data = NULL; return 0; } static int cryptof_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_CRYPTO; return (0); } static struct csession * csefind(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { refcount_acquire(&cse->refs); mtx_unlock(&fcr->lock); return (cse); } } mtx_unlock(&fcr->lock); return (NULL); } static bool csedelete(struct fcrypt *fcr, u_int ses) { struct csession *cse; mtx_lock(&fcr->lock); TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse->ses == ses) { TAILQ_REMOVE(&fcr->csessions, cse, next); mtx_unlock(&fcr->lock); csefree(cse); return (true); } } mtx_unlock(&fcr->lock); return (false); } struct csession * csecreate(struct fcrypt *fcr, crypto_session_t cses, struct crypto_session_params *csp, struct enc_xform *txform, void *key, struct auth_hash *thash, void *mackey) { struct csession *cse; cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO); if (cse == NULL) return NULL; mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF); refcount_init(&cse->refs, 1); cse->key = key; cse->mackey = mackey; cse->mode = csp->csp_mode; cse->cses = cses; cse->txform = txform; if (thash != NULL) cse->hashsize = thash->hashsize; else if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) cse->hashsize = AES_GMAC_HASH_LEN; else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) cse->hashsize = AES_CBC_MAC_HASH_LEN; cse->ivsize = csp->csp_ivlen; mtx_lock(&fcr->lock); TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); cse->ses = fcr->sesn++; mtx_unlock(&fcr->lock); return (cse); } static void csefree(struct csession *cse) { if (!refcount_release(&cse->refs)) return; crypto_freesession(cse->cses); mtx_destroy(&cse->lock); if (cse->key) free(cse->key, M_XDATA); if (cse->mackey) free(cse->mackey, M_XDATA); free(cse, M_XDATA); } static int cryptoioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct file *f; struct fcrypt *fcr; int fd, error; switch (cmd) { case CRIOGET: error = falloc_noinstall(td, &f); if (error) break; fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK | M_ZERO); TAILQ_INIT(&fcr->csessions); mtx_init(&fcr->lock, "fcrypt", NULL, MTX_DEF); finit(f, FREAD | FWRITE, DTYPE_CRYPTO, fcr, &cryptofops); error = finstall(td, f, &fd, 0, NULL); if (error) { mtx_destroy(&fcr->lock); free(fcr, M_XDATA); } else *(uint32_t *)data = fd; fdrop(f, td); break; case CRIOFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CRIOASYMFEAT: error = crypto_getfeat((int *)data); break; default: error = EINVAL; break; } return (error); } static struct cdevsw crypto_cdevsw = { .d_version = D_VERSION, .d_ioctl = cryptoioctl, .d_name = "crypto", }; static struct cdev *crypto_dev; /* * Initialization code, both for static and dynamic loading. */ static int cryptodev_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("crypto: \n"); crypto_dev = make_dev(&crypto_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "crypto"); return 0; case MOD_UNLOAD: /*XXX disallow if active sessions */ destroy_dev(crypto_dev); return 0; } return EINVAL; } static moduledata_t cryptodev_mod = { "cryptodev", cryptodev_modevent, 0 }; MODULE_VERSION(cryptodev, 1); DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_DEPEND(cryptodev, crypto, 1, 1, 1); MODULE_DEPEND(cryptodev, zlib, 1, 1, 1);