Index: head/share/man/man9/crypto.9 =================================================================== --- head/share/man/man9/crypto.9 (revision 336438) +++ head/share/man/man9/crypto.9 (revision 336439) @@ -1,722 +1,732 @@ .\" $OpenBSD: crypto.9,v 1.19 2002/07/16 06:31:57 angelos Exp $ .\" .\" The author of this manual page is Angelos D. Keromytis (angelos@cis.upenn.edu) .\" .\" Copyright (c) 2000, 2001 Angelos D. Keromytis .\" .\" Permission to use, copy, and modify this software with or without fee .\" is hereby granted, provided that this entire notice is included in .\" all source code copies of any software which is or includes a copy or .\" modification of this software. .\" .\" THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR .\" IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY .\" REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE .\" MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR .\" PURPOSE. .\" .\" $FreeBSD$ .\" -.Dd November 6, 2017 +.Dd July 17, 2018 .Dt CRYPTO 9 .Os .Sh NAME .Nm crypto .Nd API for cryptographic services in the kernel .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int32_t -.Fn crypto_get_driverid device_t int +.Fn crypto_get_driverid device_t size_t int .Ft int .Fn crypto_register uint32_t int uint16_t uint32_t "int \*[lp]*\*[rp]\*[lp]void *, uint32_t *, struct cryptoini *\*[rp]" "int \*[lp]*\*[rp]\*[lp]void *, uint64_t\*[rp]" "int \*[lp]*\*[rp]\*[lp]void *, struct cryptop *\*[rp]" "void *" .Ft int .Fn crypto_kregister uint32_t int uint32_t "int \*[lp]*\*[rp]\*[lp]void *, struct cryptkop *\*[rp]" "void *" .Ft int .Fn crypto_unregister uint32_t int .Ft int .Fn crypto_unregister_all uint32_t .Ft void .Fn crypto_done "struct cryptop *" .Ft void .Fn crypto_kdone "struct cryptkop *" .Ft int .Fn crypto_find_driver "const char *" .Ft int -.Fn crypto_newsession "uint64_t *" "struct cryptoini *" int +.Fn crypto_newsession "crypto_session_t *" "struct cryptoini *" int .Ft int -.Fn crypto_freesession uint64_t +.Fn crypto_freesession crypto_session_t .Ft int .Fn crypto_dispatch "struct cryptop *" .Ft int .Fn crypto_kdispatch "struct cryptkop *" .Ft int .Fn crypto_unblock uint32_t int .Ft "struct cryptop *" .Fn crypto_getreq int .Ft void .Fn crypto_freereq void .Bd -literal #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 #define EALG_MAX_BLOCK_LEN 16 struct cryptoini { int cri_alg; int cri_klen; int cri_mlen; caddr_t cri_key; uint8_t cri_iv[EALG_MAX_BLOCK_LEN]; struct cryptoini *cri_next; }; struct cryptodesc { int crd_skip; int crd_len; int crd_inject; int crd_flags; struct cryptoini CRD_INI; #define crd_iv CRD_INI.cri_iv #define crd_key CRD_INI.cri_key #define crd_alg CRD_INI.cri_alg #define crd_klen CRD_INI.cri_klen struct cryptodesc *crd_next; }; struct cryptop { TAILQ_ENTRY(cryptop) crp_next; - uint64_t crp_sid; + crypto_session_t crp_session; int crp_ilen; int crp_olen; int crp_etype; int crp_flags; caddr_t crp_buf; caddr_t crp_opaque; struct cryptodesc *crp_desc; int (*crp_callback) (struct cryptop *); caddr_t crp_mac; }; struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ uint32_t krp_hid; struct crparam krp_param[CRK_MAXPARAM]; int (*krp_callback)(struct cryptkop *); }; .Ed .Sh DESCRIPTION .Nm is a framework for drivers of cryptographic hardware to register with the kernel so .Dq consumers (other kernel subsystems, and users through the .Pa /dev/crypto device) are able to make use of it. Drivers register with the framework the algorithms they support, and provide entry points (functions) the framework may call to establish, use, and tear down sessions. Sessions are used to cache cryptographic information in a particular driver (or associated hardware), so initialization is not needed with every request. Consumers of cryptographic services pass a set of descriptors that instruct the framework (and the drivers registered with it) of the operations that should be applied on the data (more than one cryptographic operation can be requested). .Pp Keying operations are supported as well. Unlike the symmetric operators described above, these sessionless commands perform mathematical operations using input and output parameters. .Pp Since the consumers may not be associated with a process, drivers may not .Xr sleep 9 . The same holds for the framework. Thus, a callback mechanism is used to notify a consumer that a request has been completed (the callback is specified by the consumer on a per-request basis). The callback is invoked by the framework whether the request was successfully completed or not. An error indication is provided in the latter case. A specific error code, .Er EAGAIN , -is used to indicate that a session number has changed and that the -request may be re-submitted immediately with the new session number. +is used to indicate that a session handle has changed and that the +request may be re-submitted immediately with the new session. Errors are only returned to the invoking function if not enough information to call the callback is available (meaning, there was a fatal error in verifying the arguments). -For session initialization and teardown there is no callback mechanism used. +For session initialization and teardown no callback mechanism is used. .Pp The .Fn crypto_find_driver function may be called to return the specific id of the provided name. If the specified driver could not be found, the returned id is -1. .Pp The .Fn crypto_newsession routine is called by consumers of cryptographic services (such as the .Xr ipsec 4 stack) that wish to establish a new session with the framework. The second argument contains all the necessary information for the driver to establish the session. The third argument is either a specific driver id, or one or both of .Dv CRYPTOCAP_F_HARDWARE , to select hardware devices, or .Dv CRYPTOCAP_F_SOFTWARE , to select software devices. If both are specified, a hardware device will be returned before a software device will be. -On success, the value pointed to by the first argument will be the -Session IDentifier (SID). +On success, the value pointed to by the first argument will be the opaque +session handle. The various fields in the .Vt cryptoini structure are: .Bl -tag -width ".Va cri_next" .It Va cri_alg Contains an algorithm identifier. Currently supported algorithms are: .Pp .Bl -tag -width ".Dv CRYPTO_RIPEMD160_HMAC" -compact .It Dv CRYPTO_AES_128_NIST_GMAC .It Dv CRYPTO_AES_192_NIST_GMAC .It Dv CRYPTO_AES_256_NIST_GMAC .It Dv CRYPTO_AES_CBC .It Dv CRYPTO_AES_ICM .It Dv CRYPTO_AES_NIST_GCM_16 .It Dv CRYPTO_AES_NIST_GMAC .It Dv CRYPTO_AES_XTS .It Dv CRYPTO_ARC4 .It Dv CRYPTO_BLF_CBC .It Dv CRYPTO_CAMELLIA_CBC .It Dv CRYPTO_CAST_CBC .It Dv CRYPTO_DEFLATE_COMP .It Dv CRYPTO_DES_CBC .It Dv CRYPTO_3DES_CBC .It Dv CRYPTO_MD5 .It Dv CRYPTO_MD5_HMAC .It Dv CRYPTO_MD5_KPDK .It Dv CRYPTO_NULL_HMAC .It Dv CRYPTO_NULL_CBC .It Dv CRYPTO_RIPEMD160_HMAC .It Dv CRYPTO_SHA1 .It Dv CRYPTO_SHA1_HMAC .It Dv CRYPTO_SHA1_KPDK .It Dv CRYPTO_SHA2_256_HMAC .It Dv CRYPTO_SHA2_384_HMAC .It Dv CRYPTO_SHA2_512_HMAC .It Dv CRYPTO_SKIPJACK_CBC .El .It Va cri_klen Specifies the length of the key in bits, for variable-size key algorithms. .It Va cri_mlen Specifies how many bytes from the calculated hash should be copied back. 0 means entire hash. .It Va cri_key Contains the key to be used with the algorithm. .It Va cri_iv Contains an explicit initialization vector (IV), if it does not prefix the data. This field is ignored during initialization .Pq Nm crypto_newsession . If no IV is explicitly passed (see below on details), a random IV is used by the device driver processing the request. .It Va cri_next Contains a pointer to another .Vt cryptoini structure. Multiple such structures may be linked to establish multi-algorithm sessions .Xr ( ipsec 4 is an example consumer of such a feature). .El .Pp The .Vt cryptoini structure and its contents will not be modified by the framework (or the drivers used). -Subsequent requests for processing that use the -SID returned will avoid the cost of re-initializing the hardware (in -essence, SID acts as an index in the session cache of the driver). .Pp .Fn crypto_freesession -is called with the SID returned by +is called with the session handle returned by .Fn crypto_newsession -to disestablish the session. +to free the session. .Pp .Fn crypto_dispatch is called to process a request. The various fields in the .Vt cryptop structure are: .Bl -tag -width ".Va crp_callback" -.It Va crp_sid -Contains the SID. +.It Va crp_session +Contains the session handle. .It Va crp_ilen Indicates the total length in bytes of the buffer to be processed. .It Va crp_olen On return, contains the total length of the result. For symmetric crypto operations, this will be the same as the input length. This will be used if the framework needs to allocate a new buffer for the result (or for re-formatting the input). .It Va crp_callback This routine is invoked upon completion of the request, whether successful or not. It is invoked through the .Fn crypto_done routine. If the request was not successful, an error code is set in the .Va crp_etype field. It is the responsibility of the callback routine to set the appropriate .Xr spl 9 level. .It Va crp_etype Contains the error type, if any errors were encountered, or zero if the request was successfully processed. If the .Er EAGAIN -error code is returned, the SID has changed (and has been recorded in the -.Va crp_sid +error code is returned, the session handle has changed (and has been recorded +in the +.Va crp_session field). -The consumer should record the new SID and use it in all subsequent requests. +The consumer should record the new session handle and use it in all subsequent +requests. In this case, the request may be re-submitted immediately. This mechanism is used by the framework to perform session migration (move a session from one driver to another, because of availability, performance, or other considerations). .Pp Note that this field only makes sense when examined by the callback routine specified in .Va crp_callback . Errors are returned to the invoker of .Fn crypto_process only when enough information is not present to call the callback routine (i.e., if the pointer passed is .Dv NULL or if no callback routine was specified). .It Va crp_flags Is a bitmask of flags associated with this request. Currently defined flags are: .Bl -tag -width ".Dv CRYPTO_F_CBIFSYNC" .It Dv CRYPTO_F_IMBUF The buffer pointed to by .Va crp_buf is an mbuf chain. .It Dv CRYPTO_F_IOV The buffer pointed to by .Va crp_buf is an .Vt uio structure. .It Dv CRYPTO_F_BATCH Batch operation if possible. .It Dv CRYPTO_F_CBIMM Do callback immediately instead of doing it from a dedicated kernel thread. .It Dv CRYPTO_F_DONE Operation completed. .It Dv CRYPTO_F_CBIFSYNC Do callback immediately if operation is synchronous (that the driver specified the .Dv CRYPTOCAP_F_SYNC flag). .It Dv CRYPTO_F_ASYNC Try to do the crypto operation in a pool of workers if the operation is synchronous (that is, if the driver specified the .Dv CRYPTOCAP_F_SYNC flag). It aims to speed up processing by dispatching crypto operations on different processors. .It Dv CRYPTO_F_ASYNC_KEEPORDER Dispatch callbacks in the same order they are posted. Only relevant if the .Dv CRYPTO_F_ASYNC flag is set and if the operation is synchronous. .El .It Va crp_buf Points to the input buffer. On return (when the callback is invoked), it contains the result of the request. The input buffer may be an mbuf chain or a contiguous buffer, depending on .Va crp_flags . .It Va crp_opaque This is passed through the crypto framework untouched and is intended for the invoking application's use. .It Va crp_desc This is a linked list of descriptors. Each descriptor provides information about what type of cryptographic operation should be done on the input buffer. The various fields are: .Bl -tag -width ".Va crd_inject" .It Va crd_iv When the flag .Dv CRD_F_IV_EXPLICIT is set, this field contains the IV. .It Va crd_key When the .Dv CRD_F_KEY_EXPLICIT flag is set, the .Va crd_key points to a buffer with encryption or authentication key. .It Va crd_alg An algorithm to use. Must be the same as the one given at newsession time. .It Va crd_klen The .Va crd_key key length. .It Va crd_skip The offset in the input buffer where processing should start. .It Va crd_len How many bytes, after .Va crd_skip , should be processed. .It Va crd_inject The .Va crd_inject field specifies an offset in bytes from the beginning of the buffer. For encryption algorithms, this may be where the IV will be inserted when encrypting or where the IV may be found for decryption (subject to .Va crd_flags ) . For MAC algorithms, this is where the result of the keyed hash will be inserted. .It Va crd_flags The following flags are defined: .Bl -tag -width 3n .It Dv CRD_F_ENCRYPT For encryption algorithms, this bit is set when encryption is required (when not set, decryption is performed). .It Dv CRD_F_IV_PRESENT .\" This flag name has nothing to do w/ it's behavior, fix the name. For encryption, if this bit is not set the IV used to encrypt the packet will be written at the location pointed to by .Va crd_inject . The IV length is assumed to be equal to the blocksize of the encryption algorithm. For encryption, if this bit is set, nothing is done. For decryption, this flag has no meaning. Applications that do special .Dq "IV cooking" , such as the half-IV mode in .Xr ipsec 4 , can use this flag to indicate that the IV should not be written on the packet. This flag is typically used in conjunction with the .Dv CRD_F_IV_EXPLICIT flag. .It Dv CRD_F_IV_EXPLICIT This bit is set when the IV is explicitly provided by the consumer in the .Va crd_iv field. Otherwise, for encryption operations the IV is provided for by the driver used to perform the operation, whereas for decryption operations the offset of the IV is provided by the .Va crd_inject field. This flag is typically used when the IV is calculated .Dq "on the fly" by the consumer, and does not precede the data (some .Xr ipsec 4 configurations, and the encrypted swap are two such examples). .It Dv CRD_F_KEY_EXPLICIT For encryption and authentication (MAC) algorithms, this bit is set when the key is explicitly provided by the consumer in the .Va crd_key field for the given operation. Otherwise, the key is taken at newsession time from the .Va cri_key field. As calculating the key schedule may take a while, it is recommended that often used keys are given their own session. .It Dv CRD_F_COMP For compression algorithms, this bit is set when compression is required (when not set, decompression is performed). .El .It Va CRD_INI This .Vt cryptoini structure will not be modified by the framework or the device drivers. Since this information accompanies every cryptographic operation request, drivers may re-initialize state on-demand (typically an expensive operation). Furthermore, the cryptographic framework may re-route requests as a result of full queues or hardware failure, as described above. .It Va crd_next Point to the next descriptor. Linked operations are useful in protocols such as .Xr ipsec 4 , where multiple cryptographic transforms may be applied on the same block of data. .El .El .Pp .Fn crypto_getreq allocates a .Vt cryptop structure with a linked list of as many .Vt cryptodesc structures as were specified in the argument passed to it. .Pp .Fn crypto_freereq deallocates a structure .Vt cryptop and any .Vt cryptodesc structures linked to it. Note that it is the responsibility of the callback routine to do the necessary cleanups associated with the opaque field in the .Vt cryptop structure. .Pp .Fn crypto_kdispatch is called to perform a keying operation. The various fields in the .Vt cryptkop structure are: .Bl -tag -width ".Va krp_callback" .It Va krp_op Operation code, such as .Dv CRK_MOD_EXP . .It Va krp_status Return code. This .Va errno Ns -style variable indicates whether lower level reasons for operation failure. .It Va krp_iparams Number if input parameters to the specified operation. Note that each operation has a (typically hardwired) number of such parameters. .It Va krp_oparams Number if output parameters from the specified operation. Note that each operation has a (typically hardwired) number of such parameters. .It Va krp_kvp An array of kernel memory blocks containing the parameters. .It Va krp_hid Identifier specifying which low-level driver is being used. .It Va krp_callback Callback called on completion of a keying operation. .El .Sh DRIVER-SIDE API The .Fn crypto_get_driverid , +.Fn crypto_get_driver_session , .Fn crypto_register , .Fn crypto_kregister , .Fn crypto_unregister , .Fn crypto_unblock , and .Fn crypto_done routines are used by drivers that provide support for cryptographic primitives to register and unregister with the kernel crypto services framework. .Pp Drivers must first use the .Fn crypto_get_driverid function to acquire a driver identifier, specifying the .Fa flags as an argument. One of .Dv CRYPTOCAP_F_SOFTWARE or .Dv CRYPTOCAP_F_HARDWARE must be specified. The .Dv CRYPTOCAP_F_SYNC may also be specified, and should be specified if the driver does all of it's operations synchronously. +Drivers must pass the size of their session struct as the second argument. +An appropriately sized memory will be allocated by the framework, zeroed, and +passed to the driver's +.Fn newsession +method. .Pp For each algorithm the driver supports, it must then call .Fn crypto_register . The first two arguments are the driver and algorithm identifiers. The next two arguments specify the largest possible operator length (in bits, important for public key operations) and flags for this algorithm. The last four arguments must be provided in the first call to .Fn crypto_register and are ignored in all subsequent calls. They are pointers to three driver-provided functions that the framework may call to establish new cryptographic context with the driver, free already established context, and ask for a request to be processed (encrypt, decrypt, etc.); and an opaque parameter to pass when calling each of these routines. .Pp .Fn crypto_unregister is called by drivers that wish to withdraw support for an algorithm. The two arguments are the driver and algorithm identifiers, respectively. Typically, drivers for PCMCIA crypto cards that are being ejected will invoke this routine for all algorithms supported by the card. .Fn crypto_unregister_all will unregister all algorithms registered by a driver and the driver will be disabled (no new sessions will be allocated on that driver, and any existing sessions will be migrated to other drivers). The same will be done if all algorithms associated with a driver are unregistered one by one. After a call to .Fn crypto_unregister_all there will be no threads in either the newsession or freesession function of the driver. .Pp -The calling convention for the three driver-supplied routines are: +The calling convention for the driver-supplied routines are: .Pp .Bl -item -compact .It .Ft int -.Fn \*[lp]*newsession\*[rp] "device_t" "uint32_t *" "struct cryptoini *" ; +.Fn \*[lp]*newsession\*[rp] "device_t" "crypto_session_t" "struct cryptoini *" ; .It -.Ft int -.Fn \*[lp]*freesession\*[rp] "device_t" "uint64_t" ; +.Ft void +.Fn \*[lp]*freesession\*[rp] "device_t" "crypto_session_t" ; .It .Ft int .Fn \*[lp]*process\*[rp] "device_t" "struct cryptop *" "int" ; .It .Ft int .Fn \*[lp]*kprocess\*[rp] "device_t" "struct cryptkop *" "int" ; .El .Pp On invocation, the first argument to all routines is the .Fa device_t that was provided to .Fn crypto_get_driverid . The second argument to .Fn newsession -contains the driver identifier obtained via -.Fn crypto_get_driverid . -On successful return, it should contain a driver-specific session -identifier. +is the opaque session handle for the new session. The third argument is identical to that of .Fn crypto_newsession . .Pp +Drivers obtain a pointer to their session memory by invoking +.Fn crypto_get_driver_session +on the opaque +.Vt crypto_session_t +handle. +.Pp The .Fn freesession -routine takes as arguments the opaque data value and the SID -(which is the concatenation of the -driver identifier and the driver-specific session identifier). +routine takes as arguments the opaque data value and the session handle. It should clear any context associated with the session (clear hardware registers, memory, etc.). +If no resources need to be released other than the contents of session memory, +the method is optional. +The +.Nm +framework will zero and release the allocated session memory (after running the +.Fn freesession +method, if one exists). .Pp The .Fn process routine is invoked with a request to perform crypto processing. This routine must not block or sleep, but should queue the request and return immediately or process the request to completion. In case of an unrecoverable error, the error indication must be placed in the .Va crp_etype field of the .Vt cryptop structure. When the request is completed, or an error is detected, the .Fn process routine must invoke .Fn crypto_done . Session migration may be performed, as mentioned previously. .Pp In case of a temporary resource exhaustion, the .Fn process routine may return .Er ERESTART in which case the crypto services will requeue the request, mark the driver as .Dq blocked , and stop submitting requests for processing. The driver is then responsible for notifying the crypto services when it is again able to process requests through the .Fn crypto_unblock routine. This simple flow control mechanism should only be used for short-lived resource exhaustion as it causes operations to be queued in the crypto layer. Doing so is preferable to returning an error in such cases as it can cause network protocols to degrade performance by treating the failure much like a lost packet. .Pp The .Fn kprocess routine is invoked with a request to perform crypto key processing. This routine must not block, but should queue the request and return immediately. Upon processing the request, the callback routine should be invoked. In case of an unrecoverable error, the error indication must be placed in the .Va krp_status field of the .Vt cryptkop structure. When the request is completed, or an error is detected, the .Fn kprocess routine should invoked .Fn crypto_kdone . .Sh RETURN VALUES .Fn crypto_register , .Fn crypto_kregister , .Fn crypto_unregister , .Fn crypto_newsession , .Fn crypto_freesession , and .Fn crypto_unblock return 0 on success, or an error code on failure. .Fn crypto_get_driverid returns a non-negative value on error, and \-1 on failure. .Fn crypto_getreq returns a pointer to a .Vt cryptop structure and .Dv NULL on failure. .Fn crypto_dispatch returns .Er EINVAL if its argument or the callback function was .Dv NULL , and 0 otherwise. The callback is provided with an error code in case of failure, in the .Va crp_etype field. .Sh FILES .Bl -tag -width ".Pa sys/opencrypto/crypto.c" .It Pa sys/opencrypto/crypto.c most of the framework code .El .Sh SEE ALSO .Xr crypto 4 , .Xr ipsec 4 , .Xr crypto 7 , .Xr malloc 9 , .Xr sleep 9 .Sh HISTORY The cryptographic framework first appeared in .Ox 2.7 and was written by .An Angelos D. Keromytis Aq Mt angelos@openbsd.org . .Sh BUGS The framework currently assumes that all the algorithms in a .Fn crypto_newsession operation must be available by the same driver. If that is not the case, session initialization will fail. .Pp The framework also needs a mechanism for determining which driver is best for a specific set of algorithms associated with a session. Some type of benchmarking is in order here. .Pp Multiple instances of the same algorithm in the same session are not supported. -Note that 3DES is considered one algorithm (and not three -instances of DES). -Thus, 3DES and DES could be mixed in the same request. Index: head/sys/crypto/aesni/aesni.c =================================================================== --- head/sys/crypto/aesni/aesni.c (revision 336438) +++ head/sys/crypto/aesni/aesni.c (revision 336439) @@ -1,979 +1,876 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014 The FreeBSD Foundation * Copyright (c) 2017 Conrad Meyer * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) #include #elif defined(__amd64__) #include #endif static struct mtx_padalign *ctx_mtx; static struct fpu_kern_ctx **ctx_fpu; struct aesni_softc { - int dieing; int32_t cid; - uint32_t sid; bool has_aes; bool has_sha; - TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions; - struct rwlock lock; }; #define ACQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_fpu[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) -static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri); -static int aesni_freesession(device_t, uint64_t tid); -static void aesni_freesession_locked(struct aesni_softc *sc, - struct aesni_session *ses); +static int aesni_newsession(device_t, crypto_session_t cses, + struct cryptoini *cri); static int aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini, struct cryptoini *authini); static int aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp); static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp); static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd, struct cryptop *crp); MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); static void aesni_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "aesni", -1) == NULL && BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) panic("aesni: could not attach"); } static void detect_cpu_features(bool *has_aes, bool *has_sha) { *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && (cpu_feature2 & CPUID2_SSE41) != 0); *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && (cpu_feature2 & CPUID2_SSSE3) != 0); } static int aesni_probe(device_t dev) { bool has_aes, has_sha; detect_cpu_features(&has_aes, &has_sha); if (!has_aes && !has_sha) { device_printf(dev, "No AES or SHA support.\n"); return (EINVAL); } else if (has_aes && has_sha) device_set_desc(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM,SHA1,SHA256"); else if (has_aes) device_set_desc(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM"); else device_set_desc(dev, "SHA1,SHA256"); return (0); } static void aesni_cleanctx(void) { int i; /* XXX - no way to return driverid */ CPU_FOREACH(i) { if (ctx_fpu[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_fpu[i]); } ctx_fpu[i] = NULL; } free(ctx_mtx, M_AESNI); ctx_mtx = NULL; free(ctx_fpu, M_AESNI); ctx_fpu = NULL; } static int aesni_attach(device_t dev) { struct aesni_softc *sc; int i; sc = device_get_softc(dev); - sc->dieing = 0; - TAILQ_INIT(&sc->sessions); - sc->sid = 1; - sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | - CRYPTOCAP_F_SYNC); + sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_fpu[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); } - rw_init(&sc->lock, "aesni_lock"); - detect_cpu_features(&sc->has_aes, &sc->has_sha); if (sc->has_aes) { crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0); crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0); crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0); } if (sc->has_sha) { crypto_register(sc->cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0); } return (0); } static int aesni_detach(device_t dev) { struct aesni_softc *sc; - struct aesni_session *ses; sc = device_get_softc(dev); - rw_wlock(&sc->lock); - TAILQ_FOREACH(ses, &sc->sessions, next) { - if (ses->used) { - rw_wunlock(&sc->lock); - device_printf(dev, - "Cannot detach, sessions still active.\n"); - return (EBUSY); - } - } - sc->dieing = 1; - while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) { - TAILQ_REMOVE(&sc->sessions, ses, next); - free(ses, M_AESNI); - } - rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); - rw_destroy(&sc->lock); - aesni_cleanctx(); return (0); } static int -aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +aesni_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct aesni_softc *sc; struct aesni_session *ses; struct cryptoini *encini, *authini; bool gcm_hash, gcm; int error; - if (sidp == NULL || cri == NULL) { - CRYPTDEB("no sidp or cri"); + KASSERT(cses != NULL, ("EDOOFUS")); + if (cri == NULL) { + CRYPTDEB("no cri"); return (EINVAL); } sc = device_get_softc(dev); - if (sc->dieing) - return (EINVAL); - ses = NULL; + ses = crypto_get_driver_session(cses); + authini = NULL; encini = NULL; gcm = false; gcm_hash = false; for (; cri != NULL; cri = cri->cri_next) { switch (cri->cri_alg) { case CRYPTO_AES_NIST_GCM_16: gcm = true; /* FALLTHROUGH */ case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_XTS: if (!sc->has_aes) goto unhandled; if (encini != NULL) { CRYPTDEB("encini already set"); return (EINVAL); } encini = cri; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: /* * nothing to do here, maybe in the future cache some * values for GHASH */ gcm_hash = true; break; case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: if (!sc->has_sha) goto unhandled; if (authini != NULL) { CRYPTDEB("authini already set"); return (EINVAL); } authini = cri; break; default: unhandled: CRYPTDEB("unhandled algorithm"); return (EINVAL); } } if (encini == NULL && authini == NULL) { CRYPTDEB("no cipher"); return (EINVAL); } /* * GMAC algorithms are only supported with simultaneous GCM. Likewise * GCM is not supported without GMAC. */ if (gcm_hash != gcm) return (EINVAL); - rw_wlock(&sc->lock); - if (sc->dieing) { - rw_wunlock(&sc->lock); - return (EINVAL); - } - /* - * Free sessions are inserted at the head of the list. So if the first - * session is used, none are free and we must allocate a new one. - */ - ses = TAILQ_FIRST(&sc->sessions); - if (ses == NULL || ses->used) { - ses = malloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO); - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (ENOMEM); - } - ses->id = sc->sid++; - } else { - TAILQ_REMOVE(&sc->sessions, ses, next); - } - ses->used = 1; - TAILQ_INSERT_TAIL(&sc->sessions, ses, next); - rw_wunlock(&sc->lock); - if (encini != NULL) ses->algo = encini->cri_alg; if (authini != NULL) ses->auth_algo = authini->cri_alg; error = aesni_cipher_setup(ses, encini, authini); if (error != 0) { CRYPTDEB("setup failed"); - rw_wlock(&sc->lock); - aesni_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); return (error); } - *sidp = ses->id; return (0); } -static void -aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses) -{ - uint32_t sid; - - rw_assert(&sc->lock, RA_WLOCKED); - - sid = ses->id; - TAILQ_REMOVE(&sc->sessions, ses, next); - explicit_bzero(ses, sizeof(*ses)); - ses->id = sid; - TAILQ_INSERT_HEAD(&sc->sessions, ses, next); -} - static int -aesni_freesession(device_t dev, uint64_t tid) -{ - struct aesni_softc *sc; - struct aesni_session *ses; - uint32_t sid; - - sc = device_get_softc(dev); - sid = ((uint32_t)tid) & 0xffffffff; - rw_wlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) { - if (ses->id == sid) - break; - } - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (EINVAL); - } - aesni_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); - return (0); -} - -static int aesni_process(device_t dev, struct cryptop *crp, int hint __unused) { struct aesni_softc *sc; struct aesni_session *ses; struct cryptodesc *crd, *enccrd, *authcrd; int error, needauth; sc = device_get_softc(dev); ses = NULL; error = 0; enccrd = NULL; authcrd = NULL; needauth = 0; /* Sanity check. */ if (crp == NULL) return (EINVAL); - if (crp->crp_callback == NULL || crp->crp_desc == NULL) { + if (crp->crp_callback == NULL || crp->crp_desc == NULL || + crp->crp_session == NULL) { error = EINVAL; goto out; } for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { switch (crd->crd_alg) { case CRYPTO_AES_NIST_GCM_16: needauth = 1; /* FALLTHROUGH */ case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_XTS: if (enccrd != NULL) { error = EINVAL; goto out; } enccrd = crd; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: if (authcrd != NULL) { error = EINVAL; goto out; } authcrd = crd; break; default: error = EINVAL; goto out; } } if ((enccrd == NULL && authcrd == NULL) || (needauth && authcrd == NULL)) { error = EINVAL; goto out; } /* CBC & XTS can only handle full blocks for now */ if (enccrd != NULL && (enccrd->crd_alg == CRYPTO_AES_CBC || enccrd->crd_alg == CRYPTO_AES_XTS) && (enccrd->crd_len % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } - rw_rlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) { - if (ses->id == (crp->crp_sid & 0xffffffff)) - break; - } - rw_runlock(&sc->lock); - if (ses == NULL) { - error = EINVAL; - goto out; - } + ses = crypto_get_driver_session(crp->crp_session); + KASSERT(ses != NULL, ("EDOOFUS")); error = aesni_cipher_process(ses, enccrd, authcrd, crp); if (error != 0) goto out; out: crp->crp_etype = error; crypto_done(crp); return (error); } static uint8_t * aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp, bool *allocated) { struct mbuf *m; struct uio *uio; struct iovec *iov; uint8_t *addr; if (crp->crp_flags & CRYPTO_F_IMBUF) { m = (struct mbuf *)crp->crp_buf; if (m->m_next != NULL) goto alloc; addr = mtod(m, uint8_t *); } else if (crp->crp_flags & CRYPTO_F_IOV) { uio = (struct uio *)crp->crp_buf; if (uio->uio_iovcnt != 1) goto alloc; iov = uio->uio_iov; addr = (uint8_t *)iov->iov_base; } else addr = (uint8_t *)crp->crp_buf; *allocated = false; addr += enccrd->crd_skip; return (addr); alloc: addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT); if (addr != NULL) { *allocated = true; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, enccrd->crd_len, addr); } else *allocated = false; return (addr); } static device_method_t aesni_methods[] = { DEVMETHOD(device_identify, aesni_identify), DEVMETHOD(device_probe, aesni_probe), DEVMETHOD(device_attach, aesni_attach), DEVMETHOD(device_detach, aesni_detach), DEVMETHOD(cryptodev_newsession, aesni_newsession), - DEVMETHOD(cryptodev_freesession, aesni_freesession), DEVMETHOD(cryptodev_process, aesni_process), DEVMETHOD_END }; static driver_t aesni_driver = { "aesni", aesni_methods, sizeof(struct aesni_softc), }; static devclass_t aesni_devclass; DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); MODULE_VERSION(aesni, 1); MODULE_DEPEND(aesni, crypto, 1, 1, 1); static int aesni_authprepare(struct aesni_session *ses, int klen, const void *cri_key) { int keylen; if (klen % 8 != 0) return (EINVAL); keylen = klen / 8; if (keylen > sizeof(ses->hmac_key)) return (EINVAL); if (ses->auth_algo == CRYPTO_SHA1 && keylen > 0) return (EINVAL); memcpy(ses->hmac_key, cri_key, keylen); return (0); } static int aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini, struct cryptoini *authini) { struct fpu_kern_ctx *ctx; int kt, ctxidx, error; switch (ses->auth_algo) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: error = aesni_authprepare(ses, authini->cri_klen, authini->cri_key); if (error != 0) return (error); ses->mlen = authini->cri_mlen; } kt = is_fpu_kern_thread(0) || (encini == NULL); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } error = 0; if (encini != NULL) error = aesni_cipher_setup_common(ses, encini->cri_key, encini->cri_klen); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int intel_sha1_update(void *vctx, const void *vdata, u_int datalen) { struct sha1_ctxt *ctx = vctx; const char *data = vdata; size_t gaplen; size_t gapstart; size_t off; size_t copysiz; u_int blocks; off = 0; /* Do any aligned blocks without redundant copying. */ if (datalen >= 64 && ctx->count % 64 == 0) { blocks = datalen / 64; ctx->c.b64[0] += blocks * 64 * 8; intel_sha1_step(ctx->h.b32, data + off, blocks); off += blocks * 64; } while (off < datalen) { gapstart = ctx->count % 64; gaplen = 64 - gapstart; copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); ctx->count += copysiz; ctx->count %= 64; ctx->c.b64[0] += copysiz * 8; if (ctx->count % 64 == 0) intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); off += copysiz; } return (0); } static void SHA1_Finalize_fn(void *digest, void *ctx) { sha1_result(ctx, digest); } static int intel_sha256_update(void *vctx, const void *vdata, u_int len) { SHA256_CTX *ctx = vctx; uint64_t bitlen; uint32_t r; u_int blocks; const unsigned char *src = vdata; /* Number of bytes left in the buffer from previous updates */ r = (ctx->count >> 3) & 0x3f; /* Convert the length into a number of bits */ bitlen = len << 3; /* Update number of bits */ ctx->count += bitlen; /* Handle the case where we don't need to perform any transforms */ if (len < 64 - r) { memcpy(&ctx->buf[r], src, len); return (0); } /* Finish the current block */ memcpy(&ctx->buf[r], src, 64 - r); intel_sha256_step(ctx->state, ctx->buf, 1); src += 64 - r; len -= 64 - r; /* Perform complete blocks */ if (len >= 64) { blocks = len / 64; intel_sha256_step(ctx->state, src, blocks); src += blocks * 64; len -= blocks * 64; } /* Copy left over data into buffer */ memcpy(ctx->buf, src, len); return (0); } static void SHA256_Finalize_fn(void *digest, void *ctx) { SHA256_Final(digest, ctx); } /* * Compute the HASH( (key ^ xorbyte) || buf ) */ static void hmac_internal(void *ctx, uint32_t *res, int (*update)(void *, const void *, u_int), void (*finalize)(void *, void *), uint8_t *key, uint8_t xorbyte, const void *buf, size_t off, size_t buflen, int crpflags) { size_t i; for (i = 0; i < 64; i++) key[i] ^= xorbyte; update(ctx, key, 64); for (i = 0; i < 64; i++) key[i] ^= xorbyte; crypto_apply(crpflags, __DECONST(void *, buf), off, buflen, __DECONST(int (*)(void *, void *, u_int), update), ctx); finalize(res, ctx); } static int aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp) { struct fpu_kern_ctx *ctx; int error, ctxidx; bool kt; if (enccrd != NULL) { if ((enccrd->crd_alg == CRYPTO_AES_ICM || enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) && (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0) return (EINVAL); } ctx = NULL; ctxidx = 0; error = 0; kt = is_fpu_kern_thread(0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } /* Do work */ if (enccrd != NULL && authcrd != NULL) { /* Perform the first operation */ if (crp->crp_desc == enccrd) error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); else error = aesni_cipher_mac(ses, authcrd, crp); if (error != 0) goto out; /* Perform the second operation */ if (crp->crp_desc == enccrd) error = aesni_cipher_mac(ses, authcrd, crp); else error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); } else if (enccrd != NULL) error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); else error = aesni_cipher_mac(ses, authcrd, crp); if (error != 0) goto out; out: if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp) { uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf; int error, ivlen; bool encflag, allocated, authallocated; KASSERT(ses->algo != CRYPTO_AES_NIST_GCM_16 || authcrd != NULL, ("AES_NIST_GCM_16 must include MAC descriptor")); ivlen = 0; authbuf = NULL; buf = aesni_cipher_alloc(enccrd, crp, &allocated); if (buf == NULL) return (ENOMEM); authallocated = false; if (ses->algo == CRYPTO_AES_NIST_GCM_16) { authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated); if (authbuf == NULL) { error = ENOMEM; goto out; } } error = 0; encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT; if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { error = aesni_cipher_setup_common(ses, enccrd->crd_key, enccrd->crd_klen); if (error != 0) goto out; } switch (enccrd->crd_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: ivlen = AES_BLOCK_LEN; break; case CRYPTO_AES_XTS: ivlen = 8; break; case CRYPTO_AES_NIST_GCM_16: ivlen = 12; /* should support arbitarily larger */ break; } /* Setup iv */ if (encflag) { if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) bcopy(enccrd->crd_iv, iv, ivlen); else arc4rand(iv, ivlen, 0); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, iv); } else { if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) bcopy(enccrd->crd_iv, iv, ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, iv); } switch (ses->algo) { case CRYPTO_AES_CBC: if (encflag) aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, enccrd->crd_len, buf, buf, iv); else aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, enccrd->crd_len, buf, iv); break; case CRYPTO_AES_ICM: /* encryption & decryption are the same */ aesni_encrypt_icm(ses->rounds, ses->enc_schedule, enccrd->crd_len, buf, buf, iv); break; case CRYPTO_AES_XTS: if (encflag) aesni_encrypt_xts(ses->rounds, ses->enc_schedule, ses->xts_schedule, enccrd->crd_len, buf, buf, iv); else aesni_decrypt_xts(ses->rounds, ses->dec_schedule, ses->xts_schedule, enccrd->crd_len, buf, buf, iv); break; case CRYPTO_AES_NIST_GCM_16: if (!encflag) crypto_copydata(crp->crp_flags, crp->crp_buf, authcrd->crd_inject, GMAC_DIGEST_LEN, tag); else bzero(tag, sizeof tag); if (encflag) { AES_GCM_encrypt(buf, buf, authbuf, iv, tag, enccrd->crd_len, authcrd->crd_len, ivlen, ses->enc_schedule, ses->rounds); if (authcrd != NULL) crypto_copyback(crp->crp_flags, crp->crp_buf, authcrd->crd_inject, GMAC_DIGEST_LEN, tag); } else { if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, enccrd->crd_len, authcrd->crd_len, ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; } if (allocated) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, enccrd->crd_len, buf); out: if (allocated) { explicit_bzero(buf, enccrd->crd_len); free(buf, M_AESNI); } if (authallocated) { explicit_bzero(authbuf, authcrd->crd_len); free(authbuf, M_AESNI); } return (error); } static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd, struct cryptop *crp) { union { struct SHA256Context sha2 __aligned(16); struct sha1_ctxt sha1 __aligned(16); } sctx; uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; int hashlen, error; if ((crd->crd_flags & ~CRD_F_KEY_EXPLICIT) != 0) { CRYPTDEB("%s: Unsupported MAC flags: 0x%x", __func__, (crd->crd_flags & ~CRD_F_KEY_EXPLICIT)); return (EINVAL); } if ((crd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { error = aesni_authprepare(ses, crd->crd_klen, crd->crd_key); if (error != 0) return (error); } switch (ses->auth_algo) { case CRYPTO_SHA1_HMAC: hashlen = SHA1_HASH_LEN; /* Inner hash: (K ^ IPAD) || data */ sha1_init(&sctx.sha1); hmac_internal(&sctx.sha1, res, intel_sha1_update, SHA1_Finalize_fn, ses->hmac_key, 0x36, crp->crp_buf, crd->crd_skip, crd->crd_len, crp->crp_flags); /* Outer hash: (K ^ OPAD) || inner hash */ sha1_init(&sctx.sha1); hmac_internal(&sctx.sha1, res, intel_sha1_update, SHA1_Finalize_fn, ses->hmac_key, 0x5C, res, 0, hashlen, 0); break; case CRYPTO_SHA1: hashlen = SHA1_HASH_LEN; sha1_init(&sctx.sha1); crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, __DECONST(int (*)(void *, void *, u_int), intel_sha1_update), &sctx.sha1); sha1_result(&sctx.sha1, (void *)res); break; case CRYPTO_SHA2_256_HMAC: hashlen = SHA2_256_HASH_LEN; /* Inner hash: (K ^ IPAD) || data */ SHA256_Init(&sctx.sha2); hmac_internal(&sctx.sha2, res, intel_sha256_update, SHA256_Finalize_fn, ses->hmac_key, 0x36, crp->crp_buf, crd->crd_skip, crd->crd_len, crp->crp_flags); /* Outer hash: (K ^ OPAD) || inner hash */ SHA256_Init(&sctx.sha2); hmac_internal(&sctx.sha2, res, intel_sha256_update, SHA256_Finalize_fn, ses->hmac_key, 0x5C, res, 0, hashlen, 0); break; default: /* * AES-GMAC authentication is verified while processing the * enccrd */ return (0); } if (ses->mlen != 0 && ses->mlen < hashlen) hashlen = ses->mlen; crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen, (void *)res); return (0); } Index: head/sys/crypto/aesni/aesni.h =================================================================== --- head/sys/crypto/aesni/aesni.h (revision 336438) +++ head/sys/crypto/aesni/aesni.h (revision 336439) @@ -1,119 +1,117 @@ /*- * Copyright (c) 2010 Konstantin Belousov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _AESNI_H_ #define _AESNI_H_ #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #include #include #endif #if defined(__i386__) #include #elif defined(__amd64__) #include #endif #define AES128_ROUNDS 10 #define AES192_ROUNDS 12 #define AES256_ROUNDS 14 #define AES_SCHED_LEN ((AES256_ROUNDS + 1) * AES_BLOCK_LEN) struct aesni_session { uint8_t enc_schedule[AES_SCHED_LEN] __aligned(16); uint8_t dec_schedule[AES_SCHED_LEN] __aligned(16); uint8_t xts_schedule[AES_SCHED_LEN] __aligned(16); /* Same as the SHA256 Blocksize. */ uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16); int algo; int rounds; /* uint8_t *ses_ictx; */ /* uint8_t *ses_octx; */ /* int ses_mlen; */ int used; int auth_algo; int mlen; - uint32_t id; - TAILQ_ENTRY(aesni_session) next; }; /* * Internal functions, implemented in assembler. */ void aesni_set_enckey(const uint8_t *userkey, uint8_t *encrypt_schedule /*__aligned(16)*/, int number_of_rounds); void aesni_set_deckey(const uint8_t *encrypt_schedule /*__aligned(16)*/, uint8_t *decrypt_schedule /*__aligned(16)*/, int number_of_rounds); /* * Slightly more public interfaces. */ void aesni_encrypt_cbc(int rounds, const void *key_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to, const uint8_t iv[__min_size(AES_BLOCK_LEN)]); void aesni_decrypt_cbc(int rounds, const void *key_schedule /*__aligned(16)*/, size_t len, uint8_t *buf, const uint8_t iv[__min_size(AES_BLOCK_LEN)]); void aesni_encrypt_ecb(int rounds, const void *key_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to); void aesni_decrypt_ecb(int rounds, const void *key_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to); void aesni_encrypt_icm(int rounds, const void *key_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to, const uint8_t iv[__min_size(AES_BLOCK_LEN)]); void aesni_encrypt_xts(int rounds, const void *data_schedule /*__aligned(16)*/, const void *tweak_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to, const uint8_t iv[__min_size(AES_BLOCK_LEN)]); void aesni_decrypt_xts(int rounds, const void *data_schedule /*__aligned(16)*/, const void *tweak_schedule /*__aligned(16)*/, size_t len, const uint8_t *from, uint8_t *to, const uint8_t iv[__min_size(AES_BLOCK_LEN)]); /* GCM & GHASH functions */ void AES_GCM_encrypt(const unsigned char *in, unsigned char *out, const unsigned char *addt, const unsigned char *ivec, unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes, const unsigned char *key, int nr); int AES_GCM_decrypt(const unsigned char *in, unsigned char *out, const unsigned char *addt, const unsigned char *ivec, const unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes, const unsigned char *key, int nr); int aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key, int keylen); #endif /* _AESNI_H_ */ Index: head/sys/crypto/armv8/armv8_crypto.c =================================================================== --- head/sys/crypto/armv8/armv8_crypto.c (revision 336438) +++ head/sys/crypto/armv8/armv8_crypto.c (revision 336439) @@ -1,560 +1,468 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014,2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is based on the aesni code. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct armv8_crypto_softc { int dieing; int32_t cid; - uint32_t sid; - TAILQ_HEAD(armv8_crypto_sessions_head, armv8_crypto_session) sessions; struct rwlock lock; }; static struct mtx *ctx_mtx; static struct fpu_kern_ctx **ctx_vfp; #define AQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_vfp[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) -static void armv8_crypto_freesession_locked(struct armv8_crypto_softc *, - struct armv8_crypto_session *); static int armv8_crypto_cipher_process(struct armv8_crypto_session *, struct cryptodesc *, struct cryptop *); MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data"); static void armv8_crypto_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "armv8crypto", -1) == NULL && BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0) panic("ARMv8 crypto: could not attach"); } static int armv8_crypto_probe(device_t dev) { uint64_t reg; int ret = ENXIO; reg = READ_SPECIALREG(id_aa64isar0_el1); switch (ID_AA64ISAR0_AES(reg)) { case ID_AA64ISAR0_AES_BASE: case ID_AA64ISAR0_AES_PMULL: ret = 0; break; } device_set_desc_copy(dev, "AES-CBC"); /* TODO: Check more fields as we support more features */ return (ret); } static int armv8_crypto_attach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); - TAILQ_INIT(&sc->sessions); sc->dieing = 0; - sc->sid = 1; - sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | - CRYPTOCAP_F_SYNC); + sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session), + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } rw_init(&sc->lock, "armv8crypto"); ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_vfp[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW); } crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); return (0); } static int armv8_crypto_detach(device_t dev) { struct armv8_crypto_softc *sc; - struct armv8_crypto_session *ses; int i; sc = device_get_softc(dev); rw_wlock(&sc->lock); - TAILQ_FOREACH(ses, &sc->sessions, next) { - if (ses->used) { - rw_wunlock(&sc->lock); - device_printf(dev, - "Cannot detach, sessions still active.\n"); - return (EBUSY); - } - } sc->dieing = 1; - while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) { - TAILQ_REMOVE(&sc->sessions, ses, next); - free(ses, M_ARMV8_CRYPTO); - } rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); CPU_FOREACH(i) { if (ctx_vfp[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_vfp[i]); } ctx_vfp[i] = NULL; } free(ctx_mtx, M_ARMV8_CRYPTO); ctx_mtx = NULL; free(ctx_vfp, M_ARMV8_CRYPTO); ctx_vfp = NULL; return (0); } static int armv8_crypto_cipher_setup(struct armv8_crypto_session *ses, struct cryptoini *encini) { int i; switch (ses->algo) { case CRYPTO_AES_CBC: switch (encini->cri_klen) { case 128: ses->rounds = AES128_ROUNDS; break; case 192: ses->rounds = AES192_ROUNDS; break; case 256: ses->rounds = AES256_ROUNDS; break; default: CRYPTDEB("invalid CBC/ICM/GCM key length"); return (EINVAL); } break; default: return (EINVAL); } rijndaelKeySetupEnc(ses->enc_schedule, encini->cri_key, encini->cri_klen); rijndaelKeySetupDec(ses->dec_schedule, encini->cri_key, encini->cri_klen); for (i = 0; i < nitems(ses->enc_schedule); i++) { ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]); ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]); } return (0); } static int -armv8_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +armv8_crypto_newsession(device_t dev, crypto_session_t cses, + struct cryptoini *cri) { struct armv8_crypto_softc *sc; struct armv8_crypto_session *ses; struct cryptoini *encini; int error; - if (sidp == NULL || cri == NULL) { - CRYPTDEB("no sidp or cri"); + if (cri == NULL) { + CRYPTDEB("no cri"); return (EINVAL); } sc = device_get_softc(dev); if (sc->dieing) return (EINVAL); ses = NULL; encini = NULL; for (; cri != NULL; cri = cri->cri_next) { switch (cri->cri_alg) { case CRYPTO_AES_CBC: if (encini != NULL) { CRYPTDEB("encini already set"); return (EINVAL); } encini = cri; break; default: CRYPTDEB("unhandled algorithm"); return (EINVAL); } } if (encini == NULL) { CRYPTDEB("no cipher"); return (EINVAL); } rw_wlock(&sc->lock); if (sc->dieing) { rw_wunlock(&sc->lock); return (EINVAL); } - /* - * Free sessions goes first, so if first session is used, we need to - * allocate one. - */ - ses = TAILQ_FIRST(&sc->sessions); - if (ses == NULL || ses->used) { - ses = malloc(sizeof(*ses), M_ARMV8_CRYPTO, M_NOWAIT | M_ZERO); - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (ENOMEM); - } - ses->id = sc->sid++; - } else { - TAILQ_REMOVE(&sc->sessions, ses, next); - } - ses->used = 1; - TAILQ_INSERT_TAIL(&sc->sessions, ses, next); - rw_wunlock(&sc->lock); + ses = crypto_get_driver_session(cses); ses->algo = encini->cri_alg; error = armv8_crypto_cipher_setup(ses, encini); if (error != 0) { CRYPTDEB("setup failed"); - rw_wlock(&sc->lock); - armv8_crypto_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); return (error); } - *sidp = ses->id; return (0); } -static void -armv8_crypto_freesession_locked(struct armv8_crypto_softc *sc, - struct armv8_crypto_session *ses) -{ - uint32_t sid; - - rw_assert(&sc->lock, RA_WLOCKED); - - sid = ses->id; - TAILQ_REMOVE(&sc->sessions, ses, next); - *ses = (struct armv8_crypto_session){}; - ses->id = sid; - TAILQ_INSERT_HEAD(&sc->sessions, ses, next); -} - static int -armv8_crypto_freesession(device_t dev, uint64_t tid) -{ - struct armv8_crypto_softc *sc; - struct armv8_crypto_session *ses; - uint32_t sid; - - sc = device_get_softc(dev); - sid = ((uint32_t)tid) & 0xffffffff; - rw_wlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, armv8_crypto_sessions_head, - next) { - if (ses->id == sid) - break; - } - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (EINVAL); - } - armv8_crypto_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); - - return (0); -} - -static int armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused) { - struct armv8_crypto_softc *sc = device_get_softc(dev); struct cryptodesc *crd, *enccrd; struct armv8_crypto_session *ses; int error; error = 0; enccrd = NULL; /* Sanity check. */ if (crp == NULL) return (EINVAL); if (crp->crp_callback == NULL || crp->crp_desc == NULL) { error = EINVAL; goto out; } for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { switch (crd->crd_alg) { case CRYPTO_AES_CBC: if (enccrd != NULL) { error = EINVAL; goto out; } enccrd = crd; break; default: error = EINVAL; goto out; } } if (enccrd == NULL) { error = EINVAL; goto out; } /* We can only handle full blocks for now */ if ((enccrd->crd_len % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } - rw_rlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, armv8_crypto_sessions_head, - next) { - if (ses->id == (crp->crp_sid & 0xffffffff)) - break; - } - rw_runlock(&sc->lock); - if (ses == NULL) { - error = EINVAL; - goto out; - } - + ses = crypto_get_driver_session(crp->crp_session); error = armv8_crypto_cipher_process(ses, enccrd, crp); out: crp->crp_etype = error; crypto_done(crp); return (error); } static uint8_t * armv8_crypto_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp, int *allocated) { struct mbuf *m; struct uio *uio; struct iovec *iov; uint8_t *addr; if (crp->crp_flags & CRYPTO_F_IMBUF) { m = (struct mbuf *)crp->crp_buf; if (m->m_next != NULL) goto alloc; addr = mtod(m, uint8_t *); } else if (crp->crp_flags & CRYPTO_F_IOV) { uio = (struct uio *)crp->crp_buf; if (uio->uio_iovcnt != 1) goto alloc; iov = uio->uio_iov; addr = (uint8_t *)iov->iov_base; } else addr = (uint8_t *)crp->crp_buf; *allocated = 0; addr += enccrd->crd_skip; return (addr); alloc: addr = malloc(enccrd->crd_len, M_ARMV8_CRYPTO, M_NOWAIT); if (addr != NULL) { *allocated = 1; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, enccrd->crd_len, addr); } else *allocated = 0; return (addr); } static int armv8_crypto_cipher_process(struct armv8_crypto_session *ses, struct cryptodesc *enccrd, struct cryptop *crp) { struct fpu_kern_ctx *ctx; uint8_t *buf; uint8_t iv[AES_BLOCK_LEN]; int allocated, i; int encflag, ivlen; int kt; encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT; buf = armv8_crypto_cipher_alloc(enccrd, crp, &allocated); if (buf == NULL) return (ENOMEM); kt = is_fpu_kern_thread(0); if (!kt) { AQUIRE_CTX(i, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { panic("CRD_F_KEY_EXPLICIT"); } switch (enccrd->crd_alg) { case CRYPTO_AES_CBC: ivlen = AES_BLOCK_LEN; break; } /* Setup iv */ if (encflag) { if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) bcopy(enccrd->crd_iv, iv, ivlen); else arc4rand(iv, ivlen, 0); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, iv); } else { if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) bcopy(enccrd->crd_iv, iv, ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, iv); } /* Do work */ switch (ses->algo) { case CRYPTO_AES_CBC: if (encflag) armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule, enccrd->crd_len, buf, buf, iv); else armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule, enccrd->crd_len, buf, iv); break; } if (allocated) crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, enccrd->crd_len, buf); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(i, ctx); } if (allocated) { bzero(buf, enccrd->crd_len); free(buf, M_ARMV8_CRYPTO); } return (0); } static device_method_t armv8_crypto_methods[] = { DEVMETHOD(device_identify, armv8_crypto_identify), DEVMETHOD(device_probe, armv8_crypto_probe), DEVMETHOD(device_attach, armv8_crypto_attach), DEVMETHOD(device_detach, armv8_crypto_detach), DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession), - DEVMETHOD(cryptodev_freesession, armv8_crypto_freesession), DEVMETHOD(cryptodev_process, armv8_crypto_process), DEVMETHOD_END, }; static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods, sizeof(struct armv8_crypto_softc)); static devclass_t armv8_crypto_devclass; DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass, 0, 0); Index: head/sys/crypto/armv8/armv8_crypto.h =================================================================== --- head/sys/crypto/armv8/armv8_crypto.h (revision 336438) +++ head/sys/crypto/armv8/armv8_crypto.h (revision 336439) @@ -1,55 +1,52 @@ /*- * Copyright (c) 2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ARMV8_CRYPTO_H_ #define _ARMV8_CRYPTO_H_ #define AES128_ROUNDS 10 #define AES192_ROUNDS 12 #define AES256_ROUNDS 14 #define AES_SCHED_LEN ((AES256_ROUNDS + 1) * AES_BLOCK_LEN) struct armv8_crypto_session { uint32_t enc_schedule[AES_SCHED_LEN/4]; uint32_t dec_schedule[AES_SCHED_LEN/4]; int algo; int rounds; - int used; - uint32_t id; - TAILQ_ENTRY(armv8_crypto_session) next; }; void armv8_aes_encrypt_cbc(int, const void *, size_t, const uint8_t *, uint8_t *, const uint8_t[static AES_BLOCK_LEN]); void armv8_aes_decrypt_cbc(int, const void *, size_t, uint8_t *, const uint8_t[static AES_BLOCK_LEN]); #endif /* _ARMV8_CRYPTO_H_ */ Index: head/sys/crypto/blake2/blake2_cryptodev.c =================================================================== --- head/sys/crypto/blake2/blake2_cryptodev.c (revision 336438) +++ head/sys/crypto/blake2/blake2_cryptodev.c (revision 336439) @@ -1,538 +1,446 @@ /*- * Copyright (c) 2018 Conrad Meyer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__amd64__) #include #elif defined(__i386__) #include #endif struct blake2_session { int algo; size_t klen; size_t mlen; uint8_t key[BLAKE2B_KEYBYTES]; - bool used; - uint32_t id; - TAILQ_ENTRY(blake2_session) next; }; CTASSERT((size_t)BLAKE2B_KEYBYTES > (size_t)BLAKE2S_KEYBYTES); struct blake2_softc { bool dying; int32_t cid; - uint32_t sid; - TAILQ_HEAD(blake2_sessions_head, blake2_session) sessions; struct rwlock lock; }; static struct mtx_padalign *ctx_mtx; static struct fpu_kern_ctx **ctx_fpu; #define ACQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_fpu[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) -static int blake2_newsession(device_t, uint32_t *sidp, struct cryptoini *cri); -static int blake2_freesession(device_t, uint64_t tid); -static void blake2_freesession_locked(struct blake2_softc *sc, - struct blake2_session *ses); +static int blake2_newsession(device_t, crypto_session_t cses, + struct cryptoini *cri); static int blake2_cipher_setup(struct blake2_session *ses, struct cryptoini *authini); static int blake2_cipher_process(struct blake2_session *ses, struct cryptop *crp); MALLOC_DEFINE(M_BLAKE2, "blake2_data", "Blake2 Data"); static void blake2_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "blaketwo", -1) == NULL && BUS_ADD_CHILD(parent, 10, "blaketwo", -1) == 0) panic("blaketwo: could not attach"); } static int blake2_probe(device_t dev) { device_set_desc(dev, "Blake2"); return (0); } static void blake2_cleanctx(void) { int i; /* XXX - no way to return driverid */ CPU_FOREACH(i) { if (ctx_fpu[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_fpu[i]); } ctx_fpu[i] = NULL; } free(ctx_mtx, M_BLAKE2); ctx_mtx = NULL; free(ctx_fpu, M_BLAKE2); ctx_fpu = NULL; } static int blake2_attach(device_t dev) { struct blake2_softc *sc; int i; sc = device_get_softc(dev); sc->dying = false; - TAILQ_INIT(&sc->sessions); - sc->sid = 1; - sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | - CRYPTOCAP_F_SYNC); + sc->cid = crypto_get_driverid(dev, sizeof(struct blake2_session), + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_BLAKE2, M_WAITOK | M_ZERO); ctx_fpu = malloc(sizeof(*ctx_fpu) * (mp_maxid + 1), M_BLAKE2, M_WAITOK | M_ZERO); CPU_FOREACH(i) { ctx_fpu[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "bl2fpumtx", NULL, MTX_DEF | MTX_NEW); } rw_init(&sc->lock, "blake2_lock"); crypto_register(sc->cid, CRYPTO_BLAKE2B, 0, 0); crypto_register(sc->cid, CRYPTO_BLAKE2S, 0, 0); return (0); } static int blake2_detach(device_t dev) { struct blake2_softc *sc; - struct blake2_session *ses; sc = device_get_softc(dev); rw_wlock(&sc->lock); - TAILQ_FOREACH(ses, &sc->sessions, next) { - if (ses->used) { - rw_wunlock(&sc->lock); - device_printf(dev, - "Cannot detach, sessions still active.\n"); - return (EBUSY); - } - } sc->dying = true; - while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) { - TAILQ_REMOVE(&sc->sessions, ses, next); - free(ses, M_BLAKE2); - } rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); blake2_cleanctx(); return (0); } static int -blake2_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +blake2_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct blake2_softc *sc; struct blake2_session *ses; struct cryptoini *authini; int error; - if (sidp == NULL || cri == NULL) { - CRYPTDEB("no sidp or cri"); + if (cri == NULL) { + CRYPTDEB("no cri"); return (EINVAL); } sc = device_get_softc(dev); - ses = NULL; authini = NULL; for (; cri != NULL; cri = cri->cri_next) { switch (cri->cri_alg) { case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: if (authini != NULL) { CRYPTDEB("authini already set"); return (EINVAL); } authini = cri; break; default: CRYPTDEB("unhandled algorithm"); return (EINVAL); } } if (authini == NULL) { CRYPTDEB("no cipher"); return (EINVAL); } rw_wlock(&sc->lock); if (sc->dying) { rw_wunlock(&sc->lock); return (EINVAL); } - /* - * Free sessions are inserted at the head of the list. So if the first - * session is used, none are free and we must allocate a new one. - */ - ses = TAILQ_FIRST(&sc->sessions); - if (ses == NULL || ses->used) { - ses = malloc(sizeof(*ses), M_BLAKE2, M_NOWAIT | M_ZERO); - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (ENOMEM); - } - ses->id = sc->sid++; - } else { - TAILQ_REMOVE(&sc->sessions, ses, next); - } - ses->used = true; - TAILQ_INSERT_TAIL(&sc->sessions, ses, next); rw_wunlock(&sc->lock); + ses = crypto_get_driver_session(cses); + ses->algo = authini->cri_alg; error = blake2_cipher_setup(ses, authini); if (error != 0) { CRYPTDEB("setup failed"); - rw_wlock(&sc->lock); - blake2_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); return (error); } - *sidp = ses->id; return (0); } -static void -blake2_freesession_locked(struct blake2_softc *sc, struct blake2_session *ses) -{ - uint32_t sid; - - rw_assert(&sc->lock, RA_WLOCKED); - - sid = ses->id; - TAILQ_REMOVE(&sc->sessions, ses, next); - explicit_bzero(ses, sizeof(*ses)); - ses->id = sid; - TAILQ_INSERT_HEAD(&sc->sessions, ses, next); -} - static int -blake2_freesession(device_t dev, uint64_t tid) -{ - struct blake2_softc *sc; - struct blake2_session *ses; - uint32_t sid; - - sc = device_get_softc(dev); - sid = ((uint32_t)tid) & 0xffffffff; - rw_wlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, blake2_sessions_head, next) { - if (ses->id == sid) - break; - } - if (ses == NULL) { - rw_wunlock(&sc->lock); - return (EINVAL); - } - blake2_freesession_locked(sc, ses); - rw_wunlock(&sc->lock); - return (0); -} - -static int blake2_process(device_t dev, struct cryptop *crp, int hint __unused) { - struct blake2_softc *sc; struct blake2_session *ses; struct cryptodesc *crd, *authcrd; int error; - sc = device_get_softc(dev); ses = NULL; error = 0; authcrd = NULL; /* Sanity check. */ if (crp == NULL) return (EINVAL); if (crp->crp_callback == NULL || crp->crp_desc == NULL) { error = EINVAL; goto out; } for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { switch (crd->crd_alg) { case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: if (authcrd != NULL) { error = EINVAL; goto out; } authcrd = crd; break; default: error = EINVAL; goto out; } } - rw_rlock(&sc->lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sessions, blake2_sessions_head, next) { - if (ses->id == (crp->crp_sid & 0xffffffff)) - break; - } - rw_runlock(&sc->lock); - if (ses == NULL) { - error = EINVAL; - goto out; - } - + ses = crypto_get_driver_session(crp->crp_session); error = blake2_cipher_process(ses, crp); if (error != 0) goto out; out: crp->crp_etype = error; crypto_done(crp); return (error); } static device_method_t blake2_methods[] = { DEVMETHOD(device_identify, blake2_identify), DEVMETHOD(device_probe, blake2_probe), DEVMETHOD(device_attach, blake2_attach), DEVMETHOD(device_detach, blake2_detach), DEVMETHOD(cryptodev_newsession, blake2_newsession), - DEVMETHOD(cryptodev_freesession, blake2_freesession), DEVMETHOD(cryptodev_process, blake2_process), DEVMETHOD_END }; static driver_t blake2_driver = { "blaketwo", blake2_methods, sizeof(struct blake2_softc), }; static devclass_t blake2_devclass; DRIVER_MODULE(blake2, nexus, blake2_driver, blake2_devclass, 0, 0); MODULE_VERSION(blake2, 1); MODULE_DEPEND(blake2, crypto, 1, 1, 1); static int blake2_cipher_setup(struct blake2_session *ses, struct cryptoini *authini) { int keylen; CTASSERT((size_t)BLAKE2S_OUTBYTES <= (size_t)BLAKE2B_OUTBYTES); if (authini->cri_mlen < 0) return (EINVAL); switch (ses->algo) { case CRYPTO_BLAKE2S: if (authini->cri_mlen != 0 && authini->cri_mlen > BLAKE2S_OUTBYTES) return (EINVAL); /* FALLTHROUGH */ case CRYPTO_BLAKE2B: if (authini->cri_mlen != 0 && authini->cri_mlen > BLAKE2B_OUTBYTES) return (EINVAL); if (authini->cri_klen % 8 != 0) return (EINVAL); keylen = authini->cri_klen / 8; if (keylen > sizeof(ses->key) || (ses->algo == CRYPTO_BLAKE2S && keylen > BLAKE2S_KEYBYTES)) return (EINVAL); ses->klen = keylen; memcpy(ses->key, authini->cri_key, keylen); ses->mlen = authini->cri_mlen; } return (0); } static int blake2b_applicator(void *state, void *buf, u_int len) { int rc; rc = blake2b_update(state, buf, len); if (rc != 0) return (EINVAL); return (0); } static int blake2s_applicator(void *state, void *buf, u_int len) { int rc; rc = blake2s_update(state, buf, len); if (rc != 0) return (EINVAL); return (0); } static int blake2_cipher_process(struct blake2_session *ses, struct cryptop *crp) { union { blake2b_state sb; blake2s_state ss; } bctx; char res[BLAKE2B_OUTBYTES]; struct fpu_kern_ctx *ctx; int ctxidx; bool kt; struct cryptodesc *crd; int error, rc; size_t hashlen; crd = crp->crp_desc; ctx = NULL; ctxidx = 0; error = EINVAL; kt = is_fpu_kern_thread(0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if (crd->crd_flags != 0) goto out; switch (ses->algo) { case CRYPTO_BLAKE2B: if (ses->mlen != 0) hashlen = ses->mlen; else hashlen = BLAKE2B_OUTBYTES; if (ses->klen > 0) rc = blake2b_init_key(&bctx.sb, hashlen, ses->key, ses->klen); else rc = blake2b_init(&bctx.sb, hashlen); if (rc != 0) goto out; error = crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, blake2b_applicator, &bctx.sb); if (error != 0) goto out; rc = blake2b_final(&bctx.sb, res, hashlen); if (rc != 0) { error = EINVAL; goto out; } break; case CRYPTO_BLAKE2S: if (ses->mlen != 0) hashlen = ses->mlen; else hashlen = BLAKE2S_OUTBYTES; if (ses->klen > 0) rc = blake2s_init_key(&bctx.ss, hashlen, ses->key, ses->klen); else rc = blake2s_init(&bctx.ss, hashlen); if (rc != 0) goto out; error = crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, blake2s_applicator, &bctx.ss); if (error != 0) goto out; rc = blake2s_final(&bctx.ss, res, hashlen); if (rc != 0) { error = EINVAL; goto out; } break; default: panic("unreachable"); } crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen, (void *)res); out: if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } Index: head/sys/crypto/ccp/ccp.c =================================================================== --- head/sys/crypto/ccp/ccp.c (revision 336438) +++ head/sys/crypto/ccp/ccp.c (revision 336439) @@ -1,931 +1,883 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017 Chelsio Communications, Inc. * Copyright (c) 2017 Conrad Meyer * All rights reserved. * Largely borrowed from ccr(4), Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include "cryptodev_if.h" #include "ccp.h" #include "ccp_hardware.h" MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto"); /* * Need a global softc available for garbage random_source API, which lacks any * context pointer. It's also handy for debugging. */ struct ccp_softc *g_ccp_softc; bool g_debug_print = false; SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0, "Set to enable debugging log messages"); static struct pciid { uint32_t devid; const char *desc; } ccp_ids[] = { { 0x14561022, "AMD CCP-5a" }, { 0x14681022, "AMD CCP-5b" }, }; static struct random_source random_ccp = { .rs_ident = "AMD CCP TRNG", .rs_source = RANDOM_PURE_CCP, .rs_read = random_ccp_read, }; /* * ccp_populate_sglist() generates a scatter/gather list that covers the entire * crypto operation buffer. */ static int ccp_populate_sglist(struct sglist *sg, struct cryptop *crp) { int error; sglist_reset(sg); if (crp->crp_flags & CRYPTO_F_IMBUF) error = sglist_append_mbuf(sg, crp->crp_mbuf); else if (crp->crp_flags & CRYPTO_F_IOV) error = sglist_append_uio(sg, crp->crp_uio); else error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); return (error); } /* * Handle a GCM request with an empty payload by performing the * operation in software. Derived from swcr_authenc(). */ static void ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) { struct aes_gmac_ctx gmac_ctx; char block[GMAC_BLOCK_LEN]; char digest[GMAC_DIGEST_LEN]; char iv[AES_BLOCK_LEN]; int i, len; /* * This assumes a 12-byte IV from the crp. See longer comment * above in ccp_gcm() for more details. */ if (crde->crd_flags & CRD_F_ENCRYPT) { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else arc4rand(iv, 12, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, 12, iv); } else { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, 12, iv); } *(uint32_t *)&iv[12] = htobe32(1); /* Initialize the MAC. */ AES_GMAC_Init(&gmac_ctx); AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len); AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv)); /* MAC the AAD. */ for (i = 0; i < crda->crd_len; i += sizeof(block)) { len = imin(crda->crd_len - i, sizeof(block)); crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip + i, len, block); bzero(block + len, sizeof(block) - len); AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); } /* Length block. */ bzero(block, sizeof(block)); ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8); AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); AES_GMAC_Final(digest, &gmac_ctx); if (crde->crd_flags & CRD_F_ENCRYPT) { crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest), digest); crp->crp_etype = 0; } else { char digest2[GMAC_DIGEST_LEN]; crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) crp->crp_etype = 0; else crp->crp_etype = EBADMSG; } crypto_done(crp); } static int ccp_probe(device_t dev) { struct pciid *ip; uint32_t id; id = pci_get_devid(dev); for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) { if (id == ip->devid) { device_set_desc(dev, ip->desc); return (0); } } return (ENXIO); } static void ccp_initialize_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; qp->cq_softc = sc; qp->cq_qindex = i; mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF); /* XXX - arbitrarily chosen sizes */ qp->cq_sg_crp = sglist_alloc(32, M_WAITOK); /* Two more SGEs than sg_crp to accommodate ipad. */ qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK); qp->cq_sg_dst = sglist_alloc(2, M_WAITOK); } } static void ccp_free_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; mtx_destroy(&qp->cq_lock); sglist_free(qp->cq_sg_crp); sglist_free(qp->cq_sg_ulptx); sglist_free(qp->cq_sg_dst); } } static int ccp_attach(device_t dev) { struct ccp_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; - sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session), + CRYPTOCAP_F_HARDWARE); if (sc->cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } error = ccp_hw_attach(dev); if (error != 0) return (error); mtx_init(&sc->lock, "ccp", NULL, MTX_DEF); ccp_initialize_queues(sc); if (g_ccp_softc == NULL) { g_ccp_softc = sc; if ((sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_register(&random_ccp); } if ((sc->hw_features & VERSION_CAP_AES) != 0) { crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0); crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0); crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0); } if ((sc->hw_features & VERSION_CAP_SHA) != 0) { crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0); crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0); } return (0); } static int ccp_detach(device_t dev) { struct ccp_softc *sc; - int i; sc = device_get_softc(dev); mtx_lock(&sc->lock); - for (i = 0; i < sc->nsessions; i++) { - if (sc->sessions[i].active || sc->sessions[i].pending != 0) { - mtx_unlock(&sc->lock); - return (EBUSY); - } - } sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_deregister(&random_ccp); ccp_hw_detach(dev); ccp_free_queues(sc); if (g_ccp_softc == sc) g_ccp_softc = NULL; - free(sc->sessions, M_CCP); mtx_destroy(&sc->lock); return (0); } static void ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key, int klen) { union authctx auth_ctx; struct auth_hash *axf; u_int i; /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ axf = s->hmac.auth_hash; klen /= 8; if (klen > axf->blocksize) { axf->Init(&auth_ctx); axf->Update(&auth_ctx, key, klen); axf->Final(s->hmac.ipad, &auth_ctx); explicit_bzero(&auth_ctx, sizeof(auth_ctx)); klen = axf->hashsize; } else memcpy(s->hmac.ipad, key, klen); memset(s->hmac.ipad + klen, 0, axf->blocksize - klen); memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); for (i = 0; i < axf->blocksize; i++) { s->hmac.ipad[i] ^= HMAC_IPAD_VAL; s->hmac.opad[i] ^= HMAC_OPAD_VAL; } } static int ccp_aes_check_keylen(int alg, int klen) { switch (klen) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (EINVAL); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (EINVAL); break; default: return (EINVAL); } return (0); } static void ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen) { unsigned kbits; if (alg == CRYPTO_AES_XTS) kbits = klen / 2; else kbits = klen; switch (kbits) { case 128: s->blkcipher.cipher_type = CCP_AES_TYPE_128; break; case 192: s->blkcipher.cipher_type = CCP_AES_TYPE_192; break; case 256: s->blkcipher.cipher_type = CCP_AES_TYPE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen / 8; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); } static int -ccp_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +ccp_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct ccp_softc *sc; struct ccp_session *s; struct auth_hash *auth_hash; struct cryptoini *c, *hash, *cipher; enum ccp_aes_mode cipher_mode; unsigned auth_mode, iv_len; unsigned partial_digest_len; unsigned q; - int error, i, sess; + int error; bool gcm_hash; - if (sidp == NULL || cri == NULL) + if (cri == NULL) return (EINVAL); + s = crypto_get_driver_session(cses); + gcm_hash = false; cipher = NULL; hash = NULL; auth_hash = NULL; /* XXX reconcile auth_mode with use by ccp_sha */ auth_mode = 0; cipher_mode = CCP_AES_MODE_ECB; iv_len = 0; partial_digest_len = 0; for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (hash) return (EINVAL); hash = c; switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = SHA1; partial_digest_len = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = SHA2_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = SHA2_384; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = SHA2_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: gcm_hash = true; #if 0 auth_mode = CHCR_SCMD_AUTH_MODE_GHASH; #endif break; } break; case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_XTS: if (cipher) return (EINVAL); cipher = c; switch (c->cri_alg) { case CRYPTO_AES_CBC: cipher_mode = CCP_AES_MODE_CBC; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_ICM: cipher_mode = CCP_AES_MODE_CTR; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_NIST_GCM_16: cipher_mode = CCP_AES_MODE_GCTR; iv_len = AES_GCM_IV_LEN; break; case CRYPTO_AES_XTS: cipher_mode = CCP_AES_MODE_XTS; iv_len = AES_BLOCK_LEN; break; } if (c->cri_key != NULL) { error = ccp_aes_check_keylen(c->cri_alg, c->cri_klen); if (error != 0) return (error); } break; default: return (EINVAL); } } if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR)) return (EINVAL); if (hash == NULL && cipher == NULL) return (EINVAL); if (hash != NULL && hash->cri_key == NULL) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } - sess = -1; - for (i = 0; i < sc->nsessions; i++) { - if (!sc->sessions[i].active && sc->sessions[i].pending == 0) { - sess = i; - break; - } - } - if (sess == -1) { - s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCP, - M_NOWAIT | M_ZERO); - if (s == NULL) { - mtx_unlock(&sc->lock); - return (ENOMEM); - } - if (sc->sessions != NULL) - memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions); - sess = sc->nsessions; - free(sc->sessions, M_CCP); - sc->sessions = s; - sc->nsessions++; - } - s = &sc->sessions[sess]; - /* Just grab the first usable queue for now. */ for (q = 0; q < nitems(sc->queues); q++) if ((sc->valid_queues & (1 << q)) != 0) break; if (q == nitems(sc->queues)) { mtx_unlock(&sc->lock); return (ENXIO); } s->queue = q; if (gcm_hash) s->mode = GCM; else if (hash != NULL && cipher != NULL) s->mode = AUTHENC; else if (hash != NULL) s->mode = HMAC; else { MPASS(cipher != NULL); s->mode = BLKCIPHER; } if (gcm_hash) { if (hash->cri_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = hash->cri_mlen; } else if (hash != NULL) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; s->hmac.partial_digest_len = partial_digest_len; if (hash->cri_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = hash->cri_mlen; ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key, hash->cri_klen); } if (cipher != NULL) { s->blkcipher.cipher_mode = cipher_mode; s->blkcipher.iv_len = iv_len; if (cipher->cri_key != NULL) ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key, cipher->cri_klen); } s->active = true; mtx_unlock(&sc->lock); - *sidp = sess; return (0); } -static int -ccp_freesession(device_t dev, uint64_t tid) +static void +ccp_freesession(device_t dev, crypto_session_t cses) { - struct ccp_softc *sc; - uint32_t sid; - int error; + struct ccp_session *s; - sc = device_get_softc(dev); - sid = CRYPTO_SESID2LID(tid); - mtx_lock(&sc->lock); - if (sid >= sc->nsessions || !sc->sessions[sid].active) - error = EINVAL; - else { - if (sc->sessions[sid].pending != 0) - device_printf(dev, - "session %d freed with %d pending requests\n", sid, - sc->sessions[sid].pending); - sc->sessions[sid].active = false; - error = 0; - } - mtx_unlock(&sc->lock); - return (error); + s = crypto_get_driver_session(cses); + + if (s->pending != 0) + device_printf(dev, + "session %p freed with %d pending requests\n", s, + s->pending); + s->active = false; } static int ccp_process(device_t dev, struct cryptop *crp, int hint) { struct ccp_softc *sc; struct ccp_queue *qp; struct ccp_session *s; struct cryptodesc *crd, *crda, *crde; - uint32_t sid; int error; bool qpheld; qpheld = false; qp = NULL; if (crp == NULL) return (EINVAL); crd = crp->crp_desc; - sid = CRYPTO_SESID2LID(crp->crp_sid); + s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); mtx_lock(&sc->lock); - if (sid >= sc->nsessions || !sc->sessions[sid].active) { - mtx_unlock(&sc->lock); - error = EINVAL; - goto out; - } - - s = &sc->sessions[sid]; qp = &sc->queues[s->queue]; mtx_unlock(&sc->lock); error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT); if (error != 0) goto out; qpheld = true; error = ccp_populate_sglist(qp->cq_sg_crp, crp); if (error != 0) goto out; switch (s->mode) { case HMAC: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key, crd->crd_klen); error = ccp_hmac(qp, s, crp); break; case BLKCIPHER: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccp_aes_check_keylen(crd->crd_alg, crd->crd_klen); if (error != 0) break; ccp_aes_setkey(s, crd->crd_alg, crd->crd_key, crd->crd_klen); } error = ccp_blkcipher(qp, s, crp); break; case AUTHENC: error = 0; switch (crd->crd_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_XTS: /* Only encrypt-then-authenticate supported. */ crde = crd; crda = crd->crd_next; if (!(crde->crd_flags & CRD_F_ENCRYPT)) { error = EINVAL; break; } s->cipher_first = true; break; default: crda = crd; crde = crd->crd_next; if (crde->crd_flags & CRD_F_ENCRYPT) { error = EINVAL; break; } s->cipher_first = false; break; } if (error != 0) break; if (crda->crd_flags & CRD_F_KEY_EXPLICIT) ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key, crda->crd_klen); if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccp_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error != 0) break; ccp_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } error = ccp_authenc(qp, s, crp, crda, crde); break; case GCM: error = 0; if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) { crde = crd; crda = crd->crd_next; s->cipher_first = true; } else { crda = crd; crde = crd->crd_next; s->cipher_first = false; } if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccp_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error != 0) break; ccp_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } if (crde->crd_len == 0) { mtx_unlock(&qp->cq_lock); ccp_gcm_soft(s, crp, crda, crde); return (0); } error = ccp_gcm(qp, s, crp, crda, crde); break; } if (error == 0) s->pending++; out: if (qpheld) { if (error != 0) { /* * Squash EAGAIN so callers don't uselessly and * expensively retry if the ring was full. */ if (error == EAGAIN) error = ENOMEM; ccp_queue_abort(qp); } else ccp_queue_release(qp); } if (error != 0) { DPRINTF(dev, "%s: early error:%d\n", __func__, error); crp->crp_etype = error; crypto_done(crp); } return (0); } static device_method_t ccp_methods[] = { DEVMETHOD(device_probe, ccp_probe), DEVMETHOD(device_attach, ccp_attach), DEVMETHOD(device_detach, ccp_detach), DEVMETHOD(cryptodev_newsession, ccp_newsession), DEVMETHOD(cryptodev_freesession, ccp_freesession), DEVMETHOD(cryptodev_process, ccp_process), DEVMETHOD_END }; static driver_t ccp_driver = { "ccp", ccp_methods, sizeof(struct ccp_softc) }; static devclass_t ccp_devclass; DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL); MODULE_VERSION(ccp, 1); MODULE_DEPEND(ccp, crypto, 1, 1, 1); MODULE_DEPEND(ccp, random_device, 1, 1, 1); MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, sizeof(ccp_ids[0]), nitems(ccp_ids)); static int ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags) { struct ccp_softc *sc; mtx_assert(&qp->cq_lock, MA_OWNED); sc = qp->cq_softc; if (n < 1 || n >= (1 << sc->ring_size_order)) return (EINVAL); while (true) { if (ccp_queue_get_ring_space(qp) >= n) return (0); if ((mflags & M_WAITOK) == 0) return (EAGAIN); qp->cq_waiting = true; msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0); } } int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) { int error; mtx_lock(&qp->cq_lock); qp->cq_acq_tail = qp->cq_tail; error = ccp_queue_reserve_space(qp, n, mflags); if (error != 0) mtx_unlock(&qp->cq_lock); return (error); } void ccp_queue_release(struct ccp_queue *qp) { mtx_assert(&qp->cq_lock, MA_OWNED); if (qp->cq_tail != qp->cq_acq_tail) { wmb(); ccp_queue_write_tail(qp); } mtx_unlock(&qp->cq_lock); } void ccp_queue_abort(struct ccp_queue *qp) { unsigned i; mtx_assert(&qp->cq_lock, MA_OWNED); /* Wipe out any descriptors associated with this aborted txn. */ for (i = qp->cq_acq_tail; i != qp->cq_tail; i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) { memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i])); } qp->cq_tail = qp->cq_acq_tail; mtx_unlock(&qp->cq_lock); } #ifdef DDB #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) static void db_show_ccp_sc(struct ccp_softc *sc) { db_printf("ccp softc at %p\n", sc); db_printf(" cid: %d\n", (int)sc->cid); - db_printf(" nsessions: %d\n", sc->nsessions); db_printf(" lock: "); db_show_lock(&sc->lock); db_printf(" detaching: %d\n", (int)sc->detaching); db_printf(" ring_size_order: %u\n", sc->ring_size_order); db_printf(" hw_version: %d\n", (int)sc->hw_version); db_printf(" hw_features: %b\n", (int)sc->hw_features, "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA" "\11SHA\0103DES\07AES"); db_printf(" hw status:\n"); db_ccp_show_hw(sc); } static void db_show_ccp_qp(struct ccp_queue *qp) { db_printf(" lock: "); db_show_lock(&qp->cq_lock); db_printf(" cq_qindex: %u\n", qp->cq_qindex); db_printf(" cq_softc: %p\n", qp->cq_softc); db_printf(" head: %u\n", qp->cq_head); db_printf(" tail: %u\n", qp->cq_tail); db_printf(" acq_tail: %u\n", qp->cq_acq_tail); db_printf(" desc_ring: %p\n", qp->desc_ring); db_printf(" completions_ring: %p\n", qp->completions_ring); db_printf(" descriptors (phys): 0x%jx\n", (uintmax_t)qp->desc_ring_bus_addr); db_printf(" hw status:\n"); db_ccp_show_queue_hw(qp); } DB_SHOW_COMMAND(ccp, db_show_ccp) { struct ccp_softc *sc; unsigned unit, qindex; if (!have_addr) goto usage; unit = (unsigned)addr; sc = devclass_get_softc(ccp_devclass, unit); if (sc == NULL) { db_printf("No such device ccp%u\n", unit); goto usage; } if (count == -1) { db_show_ccp_sc(sc); return; } qindex = (unsigned)count; if (qindex >= nitems(sc->queues)) { db_printf("No such queue %u\n", qindex); goto usage; } db_show_ccp_qp(&sc->queues[qindex]); return; usage: db_printf("usage: show ccp [,]\n"); return; } #endif /* DDB */ Index: head/sys/crypto/ccp/ccp.h =================================================================== --- head/sys/crypto/ccp/ccp.h (revision 336438) +++ head/sys/crypto/ccp/ccp.h (revision 336439) @@ -1,261 +1,259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017 Conrad Meyer * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #pragma once /* * Keccak SHAKE128 (if supported by the device?) uses a 1344 bit block. * SHA3-224 is the next largest block size, at 1152 bits. However, crypto(4) * doesn't support any SHA3 hash, so SHA2 is the constraint: */ #define CCP_HASH_MAX_BLOCK_SIZE (SHA2_512_BLOCK_LEN) #define CCP_AES_MAX_KEY_LEN (AES_XTS_MAX_KEY) #define CCP_MAX_CRYPTO_IV_LEN 32 /* GCM IV + GHASH context */ #define MAX_HW_QUEUES 5 #define MAX_LSB_REGIONS 8 #ifndef __must_check #define __must_check __attribute__((__warn_unused_result__)) #endif /* * Internal data structures. */ enum sha_version { SHA1, #if 0 SHA2_224, #endif SHA2_256, SHA2_384, SHA2_512 }; struct ccp_session_hmac { struct auth_hash *auth_hash; int hash_len; unsigned int partial_digest_len; unsigned int auth_mode; unsigned int mk_size; char ipad[CCP_HASH_MAX_BLOCK_SIZE]; char opad[CCP_HASH_MAX_BLOCK_SIZE]; }; struct ccp_session_gmac { int hash_len; char final_block[GMAC_BLOCK_LEN]; }; struct ccp_session_blkcipher { unsigned cipher_mode; unsigned cipher_type; unsigned key_len; unsigned iv_len; char enckey[CCP_AES_MAX_KEY_LEN]; char iv[CCP_MAX_CRYPTO_IV_LEN]; }; struct ccp_session { bool active : 1; bool cipher_first : 1; int pending; enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode; unsigned queue; union { struct ccp_session_hmac hmac; struct ccp_session_gmac gmac; }; struct ccp_session_blkcipher blkcipher; }; struct ccp_softc; struct ccp_queue { struct mtx cq_lock; unsigned cq_qindex; struct ccp_softc *cq_softc; /* Host memory and tracking structures for descriptor ring. */ bus_dma_tag_t ring_desc_tag; bus_dmamap_t ring_desc_map; struct ccp_desc *desc_ring; bus_addr_t desc_ring_bus_addr; /* Callbacks and arguments ring; indices correspond to above ring. */ struct ccp_completion_ctx *completions_ring; uint32_t qcontrol; /* Cached register value */ unsigned lsb_mask; /* LSBs available to queue */ int private_lsb; /* Reserved LSB #, or -1 */ unsigned cq_head; unsigned cq_tail; unsigned cq_acq_tail; bool cq_waiting; /* Thread waiting for space */ struct sglist *cq_sg_crp; struct sglist *cq_sg_ulptx; struct sglist *cq_sg_dst; }; struct ccp_completion_ctx { void (*callback_fn)(struct ccp_queue *qp, struct ccp_session *s, void *arg, int error); void *callback_arg; struct ccp_session *session; }; struct ccp_softc { device_t dev; int32_t cid; - struct ccp_session *sessions; - int nsessions; struct mtx lock; bool detaching; unsigned ring_size_order; /* * Each command queue is either public or private. "Private" * (PSP-only) by default. PSP grants access to some queues to host via * QMR (Queue Mask Register). Set bits are host accessible. */ uint8_t valid_queues; uint8_t hw_version; uint8_t num_queues; uint16_t hw_features; uint16_t num_lsb_entries; /* Primary BAR (RID 2) used for register access */ bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; /* Secondary BAR (RID 5) apparently used for MSI-X */ int pci_resource_id_msix; struct resource *pci_resource_msix; /* Interrupt resources */ void *intr_tag[2]; struct resource *intr_res[2]; unsigned intr_count; struct ccp_queue queues[MAX_HW_QUEUES]; }; /* Internal globals */ SYSCTL_DECL(_hw_ccp); MALLOC_DECLARE(M_CCP); extern bool g_debug_print; extern struct ccp_softc *g_ccp_softc; /* * Debug macros. */ #define DPRINTF(dev, ...) do { \ if (!g_debug_print) \ break; \ if ((dev) != NULL) \ device_printf((dev), "XXX " __VA_ARGS__); \ else \ printf("ccpXXX: " __VA_ARGS__); \ } while (0) #if 0 #define INSECURE_DEBUG(dev, ...) do { \ if (!g_debug_print) \ break; \ if ((dev) != NULL) \ device_printf((dev), "XXX " __VA_ARGS__); \ else \ printf("ccpXXX: " __VA_ARGS__); \ } while (0) #else #define INSECURE_DEBUG(dev, ...) #endif /* * Internal hardware manipulation routines. */ int ccp_hw_attach(device_t dev); void ccp_hw_detach(device_t dev); void ccp_queue_write_tail(struct ccp_queue *qp); #ifdef DDB void db_ccp_show_hw(struct ccp_softc *sc); void db_ccp_show_queue_hw(struct ccp_queue *qp); #endif /* * Internal hardware crypt-op submission routines. */ int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) __must_check; int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp) __must_check; int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) __must_check; int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp) __must_check; /* * Internal hardware TRNG read routine. */ u_int random_ccp_read(void *v, u_int c); /* XXX */ int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) __must_check; void ccp_queue_abort(struct ccp_queue *qp); void ccp_queue_release(struct ccp_queue *qp); /* * Internal inline routines. */ static inline unsigned ccp_queue_get_active(struct ccp_queue *qp) { struct ccp_softc *sc; sc = qp->cq_softc; return ((qp->cq_tail - qp->cq_head) & ((1 << sc->ring_size_order) - 1)); } static inline unsigned ccp_queue_get_ring_space(struct ccp_queue *qp) { struct ccp_softc *sc; sc = qp->cq_softc; return ((1 << sc->ring_size_order) - ccp_queue_get_active(qp) - 1); } Index: head/sys/crypto/via/padlock.c =================================================================== --- head/sys/crypto/via/padlock.c (revision 336438) +++ head/sys/crypto/via/padlock.c (revision 336439) @@ -1,426 +1,340 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #include #include #endif #include #include #include #include #include "cryptodev_if.h" /* * Technical documentation about the PadLock engine can be found here: * * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf */ struct padlock_softc { int32_t sc_cid; - uint32_t sc_sid; - TAILQ_HEAD(padlock_sessions_head, padlock_session) sc_sessions; - struct rwlock sc_sessions_lock; }; -static int padlock_newsession(device_t, uint32_t *sidp, struct cryptoini *cri); -static int padlock_freesession(device_t, uint64_t tid); +static int padlock_newsession(device_t, crypto_session_t cses, struct cryptoini *cri); +static void padlock_freesession(device_t, crypto_session_t cses); static void padlock_freesession_one(struct padlock_softc *sc, - struct padlock_session *ses, int locked); + struct padlock_session *ses); static int padlock_process(device_t, struct cryptop *crp, int hint __unused); MALLOC_DEFINE(M_PADLOCK, "padlock_data", "PadLock Data"); static void padlock_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "padlock", -1) == NULL && BUS_ADD_CHILD(parent, 10, "padlock", -1) == 0) panic("padlock: could not attach"); } static int padlock_probe(device_t dev) { char capp[256]; #if defined(__amd64__) || defined(__i386__) /* If there is no AES support, we has nothing to do here. */ if (!(via_feature_xcrypt & VIA_HAS_AES)) { device_printf(dev, "No ACE support.\n"); return (EINVAL); } strlcpy(capp, "AES-CBC", sizeof(capp)); #if 0 strlcat(capp, ",AES-EBC", sizeof(capp)); strlcat(capp, ",AES-CFB", sizeof(capp)); strlcat(capp, ",AES-OFB", sizeof(capp)); #endif if (via_feature_xcrypt & VIA_HAS_SHA) { strlcat(capp, ",SHA1", sizeof(capp)); strlcat(capp, ",SHA256", sizeof(capp)); } #if 0 if (via_feature_xcrypt & VIA_HAS_AESCTR) strlcat(capp, ",AES-CTR", sizeof(capp)); if (via_feature_xcrypt & VIA_HAS_MM) strlcat(capp, ",RSA", sizeof(capp)); #endif device_set_desc_copy(dev, capp); return (0); #else return (EINVAL); #endif } static int padlock_attach(device_t dev) { struct padlock_softc *sc = device_get_softc(dev); - TAILQ_INIT(&sc->sc_sessions); - sc->sc_sid = 1; - - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct padlock_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } - rw_init(&sc->sc_sessions_lock, "padlock_lock"); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0); return (0); } static int padlock_detach(device_t dev) { struct padlock_softc *sc = device_get_softc(dev); - struct padlock_session *ses; - rw_wlock(&sc->sc_sessions_lock); - TAILQ_FOREACH(ses, &sc->sc_sessions, ses_next) { - if (ses->ses_used) { - rw_wunlock(&sc->sc_sessions_lock); - device_printf(dev, - "Cannot detach, sessions still active.\n"); - return (EBUSY); - } - } - while ((ses = TAILQ_FIRST(&sc->sc_sessions)) != NULL) { - TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next); - fpu_kern_free_ctx(ses->ses_fpu_ctx); - free(ses, M_PADLOCK); - } - rw_destroy(&sc->sc_sessions_lock); crypto_unregister_all(sc->sc_cid); return (0); } static int -padlock_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +padlock_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct padlock_softc *sc = device_get_softc(dev); struct padlock_session *ses = NULL; struct cryptoini *encini, *macini; struct thread *td; int error; - if (sidp == NULL || cri == NULL) + if (cri == NULL) return (EINVAL); encini = macini = NULL; for (; cri != NULL; cri = cri->cri_next) { switch (cri->cri_alg) { case CRYPTO_NULL_HMAC: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (macini != NULL) return (EINVAL); macini = cri; break; case CRYPTO_AES_CBC: if (encini != NULL) return (EINVAL); encini = cri; break; default: return (EINVAL); } } /* * We only support HMAC algorithms to be able to work with * ipsec(4), so if we are asked only for authentication without * encryption, don't pretend we can accellerate it. */ if (encini == NULL) return (EINVAL); - /* - * Let's look for a free session structure. - */ - rw_wlock(&sc->sc_sessions_lock); - /* - * Free sessions goes first, so if first session is used, we need to - * allocate one. - */ - ses = TAILQ_FIRST(&sc->sc_sessions); - if (ses == NULL || ses->ses_used) { - ses = malloc(sizeof(*ses), M_PADLOCK, M_NOWAIT | M_ZERO); - if (ses == NULL) { - rw_wunlock(&sc->sc_sessions_lock); - return (ENOMEM); - } - ses->ses_fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL | - FPU_KERN_NOWAIT); - if (ses->ses_fpu_ctx == NULL) { - free(ses, M_PADLOCK); - rw_wunlock(&sc->sc_sessions_lock); - return (ENOMEM); - } - ses->ses_id = sc->sc_sid++; - } else { - TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next); - } - ses->ses_used = 1; - TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next); - rw_wunlock(&sc->sc_sessions_lock); + ses = crypto_get_driver_session(cses); + ses->ses_fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL); error = padlock_cipher_setup(ses, encini); if (error != 0) { - padlock_freesession_one(sc, ses, 0); + padlock_freesession_one(sc, ses); return (error); } if (macini != NULL) { td = curthread; fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); error = padlock_hash_setup(ses, macini); fpu_kern_leave(td, ses->ses_fpu_ctx); if (error != 0) { - padlock_freesession_one(sc, ses, 0); + padlock_freesession_one(sc, ses); return (error); } } - *sidp = ses->ses_id; return (0); } static void -padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses, - int locked) +padlock_freesession_one(struct padlock_softc *sc, struct padlock_session *ses) { - struct fpu_kern_ctx *ctx; - uint32_t sid = ses->ses_id; - if (!locked) - rw_wlock(&sc->sc_sessions_lock); - TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next); padlock_hash_free(ses); - ctx = ses->ses_fpu_ctx; - bzero(ses, sizeof(*ses)); - ses->ses_used = 0; - ses->ses_id = sid; - ses->ses_fpu_ctx = ctx; - TAILQ_INSERT_HEAD(&sc->sc_sessions, ses, ses_next); - if (!locked) - rw_wunlock(&sc->sc_sessions_lock); + fpu_kern_free_ctx(ses->ses_fpu_ctx); } -static int -padlock_freesession(device_t dev, uint64_t tid) +static void +padlock_freesession(device_t dev, crypto_session_t cses) { struct padlock_softc *sc = device_get_softc(dev); struct padlock_session *ses; - uint32_t sid = ((uint32_t)tid) & 0xffffffff; - rw_wlock(&sc->sc_sessions_lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, padlock_sessions_head, - ses_next) { - if (ses->ses_id == sid) - break; - } - if (ses == NULL) { - rw_wunlock(&sc->sc_sessions_lock); - return (EINVAL); - } - padlock_freesession_one(sc, ses, 1); - rw_wunlock(&sc->sc_sessions_lock); - return (0); + ses = crypto_get_driver_session(cses); + padlock_freesession_one(sc, ses); } static int padlock_process(device_t dev, struct cryptop *crp, int hint __unused) { - struct padlock_softc *sc = device_get_softc(dev); struct padlock_session *ses = NULL; struct cryptodesc *crd, *enccrd, *maccrd; int error = 0; enccrd = maccrd = NULL; /* Sanity check. */ if (crp == NULL) return (EINVAL); if (crp->crp_callback == NULL || crp->crp_desc == NULL) { error = EINVAL; goto out; } for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { switch (crd->crd_alg) { case CRYPTO_NULL_HMAC: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (maccrd != NULL) { error = EINVAL; goto out; } maccrd = crd; break; case CRYPTO_AES_CBC: if (enccrd != NULL) { error = EINVAL; goto out; } enccrd = crd; break; default: return (EINVAL); } } if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } - rw_rlock(&sc->sc_sessions_lock); - TAILQ_FOREACH_REVERSE(ses, &sc->sc_sessions, padlock_sessions_head, - ses_next) { - if (ses->ses_id == (crp->crp_sid & 0xffffffff)) - break; - } - rw_runlock(&sc->sc_sessions_lock); - if (ses == NULL) { - error = EINVAL; - goto out; - } + ses = crypto_get_driver_session(crp->crp_session); /* Perform data authentication if requested before encryption. */ if (maccrd != NULL && maccrd->crd_next == enccrd) { error = padlock_hash_process(ses, maccrd, crp); if (error != 0) goto out; } error = padlock_cipher_process(ses, enccrd, crp); if (error != 0) goto out; /* Perform data authentication if requested after encryption. */ if (maccrd != NULL && enccrd->crd_next == maccrd) { error = padlock_hash_process(ses, maccrd, crp); if (error != 0) goto out; } out: #if 0 /* * This code is not necessary, because contexts will be freed on next * padlock_setup_mackey() call or at padlock_freesession() call. */ if (ses != NULL && maccrd != NULL && (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { padlock_free_ctx(ses->ses_axf, ses->ses_ictx); padlock_free_ctx(ses->ses_axf, ses->ses_octx); } #endif crp->crp_etype = error; crypto_done(crp); return (error); } static device_method_t padlock_methods[] = { DEVMETHOD(device_identify, padlock_identify), DEVMETHOD(device_probe, padlock_probe), DEVMETHOD(device_attach, padlock_attach), DEVMETHOD(device_detach, padlock_detach), DEVMETHOD(cryptodev_newsession, padlock_newsession), DEVMETHOD(cryptodev_freesession,padlock_freesession), DEVMETHOD(cryptodev_process, padlock_process), {0, 0}, }; static driver_t padlock_driver = { "padlock", padlock_methods, sizeof(struct padlock_softc), }; static devclass_t padlock_devclass; /* XXX where to attach */ DRIVER_MODULE(padlock, nexus, padlock_driver, padlock_devclass, 0, 0); MODULE_VERSION(padlock, 1); MODULE_DEPEND(padlock, crypto, 1, 1, 1); Index: head/sys/crypto/via/padlock.h =================================================================== --- head/sys/crypto/via/padlock.h (revision 336438) +++ head/sys/crypto/via/padlock.h (revision 336439) @@ -1,94 +1,91 @@ /*- * Copyright (c) 2005-2006 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _PADLOCK_H_ #define _PADLOCK_H_ #include #include #if defined(__i386__) #include #elif defined(__amd64__) #include #endif union padlock_cw { uint64_t raw; struct { u_int round_count : 4; u_int algorithm_type : 3; u_int key_generation : 1; u_int intermediate : 1; u_int direction : 1; u_int key_size : 2; u_int filler0 : 20; u_int filler1 : 32; u_int filler2 : 32; u_int filler3 : 32; } __field; }; #define cw_round_count __field.round_count #define cw_algorithm_type __field.algorithm_type #define cw_key_generation __field.key_generation #define cw_intermediate __field.intermediate #define cw_direction __field.direction #define cw_key_size __field.key_size #define cw_filler0 __field.filler0 #define cw_filler1 __field.filler1 #define cw_filler2 __field.filler2 #define cw_filler3 __field.filler3 struct padlock_session { union padlock_cw ses_cw __aligned(16); uint32_t ses_ekey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */ uint32_t ses_dkey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */ uint8_t ses_iv[16] __aligned(16); /* 128 bit aligned */ struct auth_hash *ses_axf; uint8_t *ses_ictx; uint8_t *ses_octx; int ses_mlen; - int ses_used; - uint32_t ses_id; - TAILQ_ENTRY(padlock_session) ses_next; struct fpu_kern_ctx *ses_fpu_ctx; }; #define PADLOCK_ALIGN(p) (void *)(roundup2((uintptr_t)(p), 16)) int padlock_cipher_setup(struct padlock_session *ses, struct cryptoini *encini); int padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd, struct cryptop *crp); int padlock_hash_setup(struct padlock_session *ses, struct cryptoini *macini); int padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd, struct cryptop *crp); void padlock_hash_free(struct padlock_session *ses); #endif /* !_PADLOCK_H_ */ Index: head/sys/dev/cesa/cesa.c =================================================================== --- head/sys/dev/cesa/cesa.c (revision 336438) +++ head/sys/dev/cesa/cesa.c (revision 336439) @@ -1,1894 +1,1829 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2009-2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * CESA SRAM Memory Map: * * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE * | | * | DATA | * | | * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) * | struct cesa_sa_data | * +------------------------+ * | struct cesa_sa_hdesc | * +------------------------+ <= sc->sc_sram_base_va */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include "cesa.h" static int cesa_probe(device_t); static int cesa_attach(device_t); static int cesa_attach_late(device_t); static int cesa_detach(device_t); static void cesa_intr(void *); -static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *); -static int cesa_freesession(device_t, u_int64_t); +static int cesa_newsession(device_t, crypto_session_t, struct cryptoini *); static int cesa_process(device_t, struct cryptop *, int); static struct resource_spec cesa_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static device_method_t cesa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cesa_probe), DEVMETHOD(device_attach, cesa_attach), DEVMETHOD(device_detach, cesa_detach), /* Crypto device methods */ DEVMETHOD(cryptodev_newsession, cesa_newsession), - DEVMETHOD(cryptodev_freesession,cesa_freesession), DEVMETHOD(cryptodev_process, cesa_process), DEVMETHOD_END }; static driver_t cesa_driver = { "cesa", cesa_methods, sizeof (struct cesa_softc) }; static devclass_t cesa_devclass; DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0); MODULE_DEPEND(cesa, crypto, 1, 1, 1); static void cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) { #ifdef DEBUG device_t dev; dev = sc->sc_dev; device_printf(dev, "CESA SA Hardware Descriptor:\n"); device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); #endif } static void cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct cesa_dma_mem *cdm; if (error) return; KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); cdm = arg; cdm->cdm_paddr = segs->ds_addr; } static int cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, bus_size_t size) { int error; KASSERT(cdm->cdm_vaddr == NULL, ("%s(): DMA memory descriptor in use.", __func__)); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &cdm->cdm_tag); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } return (0); err3: bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); err2: bus_dma_tag_destroy(cdm->cdm_tag); err1: cdm->cdm_vaddr = NULL; return (error); } static void cesa_free_dma_mem(struct cesa_dma_mem *cdm) { bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); bus_dma_tag_destroy(cdm->cdm_tag); cdm->cdm_vaddr = NULL; } static void cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (cdm->cdm_vaddr != NULL) bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); } static void cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) { cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_requests_cdm, op); } -static struct cesa_session * -cesa_alloc_session(struct cesa_softc *sc) -{ - struct cesa_session *cs; - - CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions); - - return (cs); -} - -static struct cesa_session * -cesa_get_session(struct cesa_softc *sc, uint32_t sid) -{ - - if (sid >= CESA_SESSIONS) - return (NULL); - - return (&sc->sc_sessions[sid]); -} - -static void -cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs) -{ - - CESA_GENERIC_FREE_LOCKED(sc, cs, sessions); -} - static struct cesa_request * cesa_alloc_request(struct cesa_softc *sc) { struct cesa_request *cr; CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); if (!cr) return (NULL); STAILQ_INIT(&cr->cr_tdesc); STAILQ_INIT(&cr->cr_sdesc); return (cr); } static void cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) { /* Free TDMA descriptors assigned to this request */ CESA_LOCK(sc, tdesc); STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); CESA_UNLOCK(sc, tdesc); /* Free SA descriptors assigned to this request */ CESA_LOCK(sc, sdesc); STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); CESA_UNLOCK(sc, sdesc); /* Unload DMA memory associated with request */ if (cr->cr_dmap_loaded) { bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); cr->cr_dmap_loaded = 0; } CESA_GENERIC_FREE_LOCKED(sc, cr, requests); } static void cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) { CESA_LOCK(sc, requests); STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); CESA_UNLOCK(sc, requests); } static struct cesa_tdma_desc * cesa_alloc_tdesc(struct cesa_softc *sc) { struct cesa_tdma_desc *ctd; CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); if (!ctd) device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); return (ctd); } static struct cesa_sa_desc * cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) { struct cesa_sa_desc *csd; CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); if (!csd) { device_printf(sc->sc_dev, "SA descriptors pool exhaused. " "Consider increasing CESA_SA_DESCRIPTORS.\n"); return (NULL); } STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); /* Fill-in SA descriptor with default values */ csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_src = 0; csd->csd_cshd->cshd_enc_dst = 0; csd->csd_cshd->cshd_enc_dlen = 0; csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); csd->csd_cshd->cshd_mac_src = 0; csd->csd_cshd->cshd_mac_dlen = 0; return (csd); } static struct cesa_tdma_desc * cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, bus_size_t size) { struct cesa_tdma_desc *ctd; ctd = cesa_alloc_tdesc(sc); if (!ctd) return (NULL); ctd->ctd_cthd->cthd_dst = dst; ctd->ctd_cthd->cthd_src = src; ctd->ctd_cthd->cthd_byte_count = size; /* Handle special control packet */ if (size != 0) ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; else ctd->ctd_cthd->cthd_flags = 0; return (ctd); } static struct cesa_tdma_desc * cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) { return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, sizeof(struct cesa_sa_hdesc))); } static void cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) { struct cesa_tdma_desc *ctd_prev; if (!STAILQ_EMPTY(&cr->cr_tdesc)) { ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } ctd->ctd_cthd->cthd_next = 0; STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); } static int cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, struct cesa_packet *cp, struct cesa_sa_desc *csd) { struct cesa_tdma_desc *ctd, *tmp; /* Copy SA descriptor for this packet */ ctd = cesa_tdma_copy_sdesc(sc, csd); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy data to be processed */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyin); /* Insert control descriptor */ ctd = cesa_tdma_copy(sc, 0, 0, 0); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy back results */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyout); return (0); } static int cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) { uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN]; uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN]; SHA1_CTX sha1ctx; SHA256_CTX sha256ctx; MD5_CTX md5ctx; uint32_t *hout; uint32_t *hin; int i; memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); for (i = 0; i < mklen; i++) { ipad[i] ^= mkey[i]; opad[i] ^= mkey[i]; } hin = (uint32_t *)cs->cs_hiv_in; hout = (uint32_t *)cs->cs_hiv_out; switch (alg) { case CRYPTO_MD5_HMAC: MD5Init(&md5ctx); MD5Update(&md5ctx, ipad, MD5_BLOCK_LEN); memcpy(hin, md5ctx.state, sizeof(md5ctx.state)); MD5Init(&md5ctx); MD5Update(&md5ctx, opad, MD5_BLOCK_LEN); memcpy(hout, md5ctx.state, sizeof(md5ctx.state)); break; case CRYPTO_SHA1_HMAC: SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, ipad, SHA1_BLOCK_LEN); memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, opad, SHA1_BLOCK_LEN); memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); break; case CRYPTO_SHA2_256_HMAC: SHA256_Init(&sha256ctx); SHA256_Update(&sha256ctx, ipad, SHA2_256_BLOCK_LEN); memcpy(hin, sha256ctx.state, sizeof(sha256ctx.state)); SHA256_Init(&sha256ctx); SHA256_Update(&sha256ctx, opad, SHA2_256_BLOCK_LEN); memcpy(hout, sha256ctx.state, sizeof(sha256ctx.state)); break; default: return (EINVAL); } for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { hin[i] = htobe32(hin[i]); hout[i] = htobe32(hout[i]); } return (0); } static int cesa_prep_aes_key(struct cesa_session *cs) { uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; uint32_t *dkey; int i; rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8); cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; dkey = (uint32_t *)cs->cs_aes_dkey; switch (cs->cs_klen) { case 16: cs->cs_config |= CESA_CSH_AES_KLEN_128; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 10 + i]); break; case 24: cs->cs_config |= CESA_CSH_AES_KLEN_192; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 12 + i]); for (i = 0; i < 2; i++) *dkey++ = htobe32(ek[4 * 11 + 2 + i]); break; case 32: cs->cs_config |= CESA_CSH_AES_KLEN_256; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 14 + i]); for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 13 + i]); break; default: return (EINVAL); } return (0); } static int cesa_is_hash(int alg) { switch (alg) { case CRYPTO_MD5: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: return (1); default: return (0); } } static void cesa_start_packet(struct cesa_packet *cp, unsigned int size) { cp->cp_size = size; cp->cp_offset = 0; STAILQ_INIT(&cp->cp_copyin); STAILQ_INIT(&cp->cp_copyout); } static int cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, bus_dma_segment_t *seg) { struct cesa_tdma_desc *ctd; unsigned int bsize; /* Calculate size of block copy */ bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); if (bsize > 0) { ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + CESA_DATA(cp->cp_offset), bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); seg->ds_len -= bsize; seg->ds_addr += bsize; cp->cp_offset += bsize; } return (bsize); } static void cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned int mpsize, fragmented; unsigned int mlen, mskip, tmlen; struct cesa_chain_info *cci; unsigned int elen, eskip; unsigned int skip, len; struct cesa_sa_desc *csd; struct cesa_request *cr; struct cesa_softc *sc; struct cesa_packet cp; bus_dma_segment_t seg; uint32_t config; int size; cci = arg; sc = cci->cci_sc; cr = cci->cci_cr; if (error) { cci->cci_error = error; return; } elen = cci->cci_enc ? cci->cci_enc->crd_len : 0; eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0; mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0; mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0; if (elen && mlen && ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) || (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) || (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) { /* * Data alignment in the request does not meet CESA requiremnts * for combined encryption/decryption and hashing. We have to * split the request to separate operations and process them * one by one. */ config = cci->cci_config; if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_MAC; cci->cci_enc = NULL; cci->cci_mac = cr->cr_mac; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); cci->cci_config = config | CESA_CSHD_ENC; cci->cci_enc = cr->cr_enc; cci->cci_mac = NULL; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); } else { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_ENC; cci->cci_enc = cr->cr_enc; cci->cci_mac = NULL; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); cci->cci_config = config | CESA_CSHD_MAC; cci->cci_enc = NULL; cci->cci_mac = cr->cr_mac; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); } return; } tmlen = mlen; fragmented = 0; mpsize = CESA_MAX_PACKET_SIZE; mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); if (elen && mlen) { skip = MIN(eskip, mskip); len = MAX(elen + eskip, mlen + mskip) - skip; } else if (elen) { skip = eskip; len = elen; } else { skip = mskip; len = mlen; } /* Start first packet in chain */ cesa_start_packet(&cp, MIN(mpsize, len)); while (nseg-- && len > 0) { seg = *(segs++); /* * Skip data in buffer on which neither ENC nor MAC operation * is requested. */ if (skip > 0) { size = MIN(skip, seg.ds_len); skip -= size; seg.ds_addr += size; seg.ds_len -= size; if (eskip > 0) eskip -= size; if (mskip > 0) mskip -= size; if (seg.ds_len == 0) continue; } while (1) { /* * Fill in current packet with data. Break if there is * no more data in current DMA segment or an error * occurred. */ size = cesa_fill_packet(sc, &cp, &seg); if (size <= 0) { error = -size; break; } len -= size; /* If packet is full, append it to the chain */ if (cp.cp_size == cp.cp_offset) { csd = cesa_alloc_sdesc(sc, cr); if (!csd) { error = ENOMEM; break; } /* Create SA descriptor for this packet */ csd->csd_cshd->cshd_config = cci->cci_config; csd->csd_cshd->cshd_mac_total_dlen = tmlen; /* * Enable fragmentation if request will not fit * into one packet. */ if (len > 0) { if (!fragmented) { fragmented = 1; csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_FIRST; } else csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_MIDDLE; } else if (fragmented) csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_LAST; if (eskip < cp.cp_size && elen > 0) { csd->csd_cshd->cshd_enc_src = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dst = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dlen = MIN(elen, cp.cp_size - eskip); } if (mskip < cp.cp_size && mlen > 0) { csd->csd_cshd->cshd_mac_src = CESA_DATA(mskip); csd->csd_cshd->cshd_mac_dlen = MIN(mlen, cp.cp_size - mskip); } elen -= csd->csd_cshd->cshd_enc_dlen; eskip -= MIN(eskip, cp.cp_size); mlen -= csd->csd_cshd->cshd_mac_dlen; mskip -= MIN(mskip, cp.cp_size); cesa_dump_cshd(sc, csd->csd_cshd); /* Append packet to the request */ error = cesa_append_packet(sc, cr, &cp, csd); if (error) break; /* Start a new packet, as current is full */ cesa_start_packet(&cp, MIN(mpsize, len)); } } if (error) break; } if (error) { /* * Move all allocated resources to the request. They will be * freed later. */ STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); cci->cci_error = error; } } static void cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t size, int error) { cesa_create_chain_cb(arg, segs, nseg, error); } static int cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr) { struct cesa_chain_info cci; struct cesa_tdma_desc *ctd; uint32_t config; int error; error = 0; CESA_LOCK_ASSERT(sc, sessions); /* Create request metadata */ if (cr->cr_enc) { if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, cr->cr_cs->cs_klen); else memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, cr->cr_cs->cs_klen); } if (cr->cr_mac) { memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, CESA_MAX_HASH_LEN); memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, CESA_MAX_HASH_LEN); } ctd = cesa_tdma_copyin_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Prepare SA configuration */ config = cr->cr_cs->cs_config; if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) config |= CESA_CSHD_DECRYPT; if (cr->cr_enc && !cr->cr_mac) config |= CESA_CSHD_ENC; if (!cr->cr_enc && cr->cr_mac) config |= CESA_CSHD_MAC; if (cr->cr_enc && cr->cr_mac) config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : CESA_CSHD_ENC_AND_MAC; /* Create data packets */ cci.cci_sc = sc; cci.cci_cr = cr; cci.cci_enc = cr->cr_enc; cci.cci_mac = cr->cr_mac; cci.cci_config = config; cci.cci_error = 0; if (cr->cr_crp->crp_flags & CRYPTO_F_IOV) error = bus_dmamap_load_uio(sc->sc_data_dtag, cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf, cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF) error = bus_dmamap_load_mbuf(sc->sc_data_dtag, cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf, cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); else error = bus_dmamap_load(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp->crp_buf, cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); if (!error) cr->cr_dmap_loaded = 1; if (cci.cci_error) error = cci.cci_error; if (error) return (error); /* Read back request metadata */ ctd = cesa_tdma_copyout_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); return (0); } static void cesa_execute(struct cesa_softc *sc) { struct cesa_tdma_desc *prev_ctd, *ctd; struct cesa_request *prev_cr, *cr; CESA_LOCK(sc, requests); /* * If ready list is empty, there is nothing to execute. If queued list * is not empty, the hardware is busy and we cannot start another * execution. */ if (STAILQ_EMPTY(&sc->sc_ready_requests) || !STAILQ_EMPTY(&sc->sc_queued_requests)) { CESA_UNLOCK(sc, requests); return; } /* Move all ready requests to queued list */ STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); STAILQ_INIT(&sc->sc_ready_requests); /* Create one execution chain from all requests on the list */ if (STAILQ_FIRST(&sc->sc_queued_requests) != STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { prev_cr = NULL; cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { if (prev_cr) { ctd = STAILQ_FIRST(&cr->cr_tdesc); prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, cesa_tdma_desc, ctd_stq); prev_ctd->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } prev_cr = cr; } cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* Start chain execution in hardware */ cr = STAILQ_FIRST(&sc->sc_queued_requests); ctd = STAILQ_FIRST(&cr->cr_tdesc); CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); else CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); CESA_UNLOCK(sc, requests); } static int cesa_setup_sram(struct cesa_softc *sc) { phandle_t sram_node; ihandle_t sram_ihandle; pcell_t sram_handle, sram_reg[2]; void *sram_va; int rv; rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", (void *)&sram_handle, sizeof(sram_handle)); if (rv <= 0) return (rv); sram_ihandle = (ihandle_t)sram_handle; sram_node = OF_instance_to_package(sram_ihandle); rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); if (rv <= 0) return (rv); sc->sc_sram_base_pa = sram_reg[0]; /* Store SRAM size to be able to unmap in detach() */ sc->sc_sram_size = sram_reg[1]; if (sc->sc_soc_id != MV_DEV_88F6828 && sc->sc_soc_id != MV_DEV_88F6820 && sc->sc_soc_id != MV_DEV_88F6810) return (0); /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); if (sram_va == NULL) return (ENOMEM); sc->sc_sram_base_va = (vm_offset_t)sram_va; return (0); } /* * Function: device_from_node * This function returns appropriate device_t to phandle_t * Parameters: * root - device where you want to start search * if you provide NULL here, function will take * "root0" device as root. * node - we are checking every device_t to be * appropriate with this. */ static device_t device_from_node(device_t root, phandle_t node) { device_t *children, retval; int nkid, i; /* Nothing matches no node */ if (node == -1) return (NULL); if (root == NULL) /* Get root of device tree */ if ((root = device_lookup_by_name("root0")) == NULL) return (NULL); if (device_get_children(root, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { /* Check if device and node matches */ if (OFW_BUS_GET_NODE(root, children[i]) == node) { retval = children[i]; break; } /* or go deeper */ if ((retval = device_from_node(children[i], node)) != NULL) break; } free(children, M_TEMP); return (retval); } static int cesa_setup_sram_armada(struct cesa_softc *sc) { phandle_t sram_node; ihandle_t sram_ihandle; pcell_t sram_handle[2]; void *sram_va; int rv, j; struct resource_list rl; struct resource_list_entry *rle; struct simplebus_softc *ssc; device_t sdev; /* Get refs to SRAMS from CESA node */ rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", (void *)sram_handle, sizeof(sram_handle)); if (rv <= 0) return (rv); if (sc->sc_cesa_engine_id >= 2) return (ENXIO); /* Get SRAM node on the basis of sc_cesa_engine_id */ sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; sram_node = OF_instance_to_package(sram_ihandle); /* Get device_t of simplebus (sram_node parent) */ sdev = device_from_node(NULL, OF_parent(sram_node)); if (!sdev) return (ENXIO); ssc = device_get_softc(sdev); resource_list_init(&rl); /* Parse reg property to resource list */ ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, ssc->scells, &rl); /* We expect only one resource */ rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); if (rle == NULL) return (ENXIO); /* Remap through ranges property */ for (j = 0; j < ssc->nranges; j++) { if (rle->start >= ssc->ranges[j].bus && rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { rle->start -= ssc->ranges[j].bus; rle->start += ssc->ranges[j].host; rle->end -= ssc->ranges[j].bus; rle->end += ssc->ranges[j].host; } } sc->sc_sram_base_pa = rle->start; sc->sc_sram_size = rle->count; /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); if (sram_va == NULL) return (ENOMEM); sc->sc_sram_base_va = (vm_offset_t)sram_va; return (0); } struct ofw_compat_data cesa_devices[] = { { "mrvl,cesa", (uintptr_t)true }, { "marvell,armada-38x-crypto", (uintptr_t)true }, { NULL, 0 } }; static int cesa_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) return (ENXIO); device_set_desc(dev, "Marvell Cryptographic Engine and Security " "Accelerator"); return (BUS_PROBE_DEFAULT); } static int cesa_attach(device_t dev) { static int engine_idx = 0; struct simplebus_devinfo *ndi; struct resource_list *rl; struct cesa_softc *sc; if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) return (cesa_attach_late(dev)); /* * Get simplebus_devinfo which contains * resource list filled with adresses and * interrupts read form FDT. * Let's correct it by splitting resources * for each engine. */ if ((ndi = device_get_ivars(dev)) == NULL) return (ENXIO); rl = &ndi->rl; switch (engine_idx) { case 0: /* Update regs values */ resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); /* Remove unused interrupt */ resource_list_delete(rl, SYS_RES_IRQ, 1); break; case 1: /* Update regs values */ resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); /* Remove unused interrupt */ resource_list_delete(rl, SYS_RES_IRQ, 0); resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; break; default: device_printf(dev, "Bad cesa engine_idx\n"); return (ENXIO); } sc = device_get_softc(dev); sc->sc_cesa_engine_id = engine_idx; /* * Call simplebus_add_device only once. * It will create second cesa driver instance * with the same FDT node as first instance. * When second driver reach this function, * it will be configured to use second cesa engine */ if (engine_idx == 0) simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 0, "cesa", 1, NULL); engine_idx++; return (cesa_attach_late(dev)); } static int cesa_attach_late(device_t dev) { struct cesa_softc *sc; uint32_t d, r, val; int error; int i; sc = device_get_softc(dev); sc->sc_blocked = 0; sc->sc_error = 0; sc->sc_dev = dev; soc_id(&d, &r); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: /* Check if CESA peripheral device has power turned on */ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } sc->sc_tperr = 0; break; case MV_DEV_88F6828: case MV_DEV_88F6820: case MV_DEV_88F6810: sc->sc_tperr = 0; break; case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: /* Check if CESA peripheral device has power turned on */ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } sc->sc_tperr = CESA_ICR_TPERR; break; default: return (ENXIO); } sc->sc_soc_id = d; /* Initialize mutexes */ mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), "CESA Shared Data", MTX_DEF); mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), "CESA TDMA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), "CESA SA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), "CESA Requests Pool", MTX_DEF); mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), "CESA Sessions Pool", MTX_DEF); /* Allocate I/O and IRQ resources */ error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); if (error) { device_printf(dev, "could not allocate resources\n"); goto err0; } /* Acquire SRAM base address */ if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) error = cesa_setup_sram(sc); else error = cesa_setup_sram_armada(sc); if (error) { device_printf(dev, "could not setup SRAM\n"); goto err1; } /* Setup interrupt handler */ error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); if (error) { device_printf(dev, "could not setup engine completion irq\n"); goto err2; } /* Create DMA tag for processed data */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ CESA_MAX_REQUEST_SIZE, /* maxsize */ CESA_MAX_FRAGMENTS, /* nsegments */ CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->sc_data_dtag); /* dmat */ if (error) goto err3; /* Initialize data structures: TDMA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); if (error) goto err4; STAILQ_INIT(&sc->sc_free_tdesc); for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { sc->sc_tdesc[i].ctd_cthd = (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_tdma_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], ctd_stq); } /* Initialize data structures: SA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); if (error) goto err5; STAILQ_INIT(&sc->sc_free_sdesc); for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { sc->sc_sdesc[i].csd_cshd = (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], csd_stq); } /* Initialize data structures: Requests Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, CESA_REQUESTS * sizeof(struct cesa_sa_data)); if (error) goto err6; STAILQ_INIT(&sc->sc_free_requests); STAILQ_INIT(&sc->sc_ready_requests); STAILQ_INIT(&sc->sc_queued_requests); for (i = 0; i < CESA_REQUESTS; i++) { sc->sc_requests[i].cr_csd = (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; sc->sc_requests[i].cr_csd_paddr = sc->sc_requests_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_data)); /* Preallocate DMA maps */ error = bus_dmamap_create(sc->sc_data_dtag, 0, &sc->sc_requests[i].cr_dmap); if (error && i > 0) { i--; do { bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); } while (i--); goto err7; } STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], cr_stq); } - /* Initialize data structures: Sessions Pool */ - STAILQ_INIT(&sc->sc_free_sessions); - for (i = 0; i < CESA_SESSIONS; i++) { - sc->sc_sessions[i].cs_sid = i; - STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i], - cs_stq); - } - /* * Initialize TDMA: * - Burst limit: 128 bytes, * - Outstanding reads enabled, * - No byte-swap. */ val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) val |= CESA_TDMA_NUM_OUTSTAND; CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); /* * Initialize SA: * - SA descriptor is present at beginning of CESA SRAM, * - Multi-packet chain mode, * - Cooperation with TDMA enabled. */ CESA_REG_WRITE(sc, CESA_SA_DPR, 0); CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); /* Unmask interrupts */ CESA_REG_WRITE(sc, CESA_ICR, 0); CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | CESA_TDMA_EMR_DATA_ERROR); /* Register in OCF */ - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto err8; } crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); return (0); err8: for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); err7: cesa_free_dma_mem(&sc->sc_requests_cdm); err6: cesa_free_dma_mem(&sc->sc_sdesc_cdm); err5: cesa_free_dma_mem(&sc->sc_tdesc_cdm); err4: bus_dma_tag_destroy(sc->sc_data_dtag); err3: bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); err2: if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); err1: bus_release_resources(dev, cesa_res_spec, sc->sc_res); err0: mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (ENXIO); } static int cesa_detach(device_t dev) { struct cesa_softc *sc; int i; sc = device_get_softc(dev); /* TODO: Wait for queued requests completion before shutdown. */ /* Mask interrupts */ CESA_REG_WRITE(sc, CESA_ICM, 0); CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA Maps */ for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); /* Free DMA Memory */ cesa_free_dma_mem(&sc->sc_requests_cdm); cesa_free_dma_mem(&sc->sc_sdesc_cdm); cesa_free_dma_mem(&sc->sc_tdesc_cdm); /* Free DMA Tag */ bus_dma_tag_destroy(sc->sc_data_dtag); /* Stop interrupt */ bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); /* Relase I/O and IRQ resources */ bus_release_resources(dev, cesa_res_spec, sc->sc_res); /* Unmap SRAM memory */ if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); /* Destroy mutexes */ mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (0); } static void cesa_intr(void *arg) { STAILQ_HEAD(, cesa_request) requests; struct cesa_request *cr, *tmp; struct cesa_softc *sc; uint32_t ecr, icr; int blocked; sc = arg; /* Ack interrupt */ ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); icr = CESA_REG_READ(sc, CESA_ICR); CESA_REG_WRITE(sc, CESA_ICR, 0); /* Check for TDMA errors */ if (ecr & CESA_TDMA_ECR_MISS) { device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_BOTH_HIT) { device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DATA_ERROR) { device_printf(sc->sc_dev, "TDMA Data error detected!\n"); sc->sc_error = EIO; } /* Check for CESA errors */ if (icr & sc->sc_tperr) { device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); sc->sc_error = EIO; } /* If there is nothing more to do, return */ if ((icr & CESA_ICR_ACCTDMA) == 0) return; /* Get all finished requests */ CESA_LOCK(sc, requests); STAILQ_INIT(&requests); STAILQ_CONCAT(&requests, &sc->sc_queued_requests); STAILQ_INIT(&sc->sc_queued_requests); CESA_UNLOCK(sc, requests); /* Execute all ready requests */ cesa_execute(sc); /* Process completed requests */ cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cr->cr_crp->crp_etype = sc->sc_error; if (cr->cr_mac) crypto_copyback(cr->cr_crp->crp_flags, cr->cr_crp->crp_buf, cr->cr_mac->crd_inject, cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); crypto_done(cr->cr_crp); cesa_free_request(sc, cr); } cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_error = 0; /* Unblock driver if it ran out of resources */ CESA_LOCK(sc, sc); blocked = sc->sc_blocked; sc->sc_blocked = 0; CESA_UNLOCK(sc, sc); if (blocked) crypto_unblock(sc->sc_cid, blocked); } static int -cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +cesa_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct cesa_session *cs; struct cesa_softc *sc; struct cryptoini *enc; struct cryptoini *mac; int error; sc = device_get_softc(dev); enc = NULL; mac = NULL; error = 0; /* Check and parse input */ if (cesa_is_hash(cri->cri_alg)) mac = cri; else enc = cri; cri = cri->cri_next; if (cri) { if (!enc && !cesa_is_hash(cri->cri_alg)) enc = cri; if (!mac && cesa_is_hash(cri->cri_alg)) mac = cri; if (cri->cri_next || !(enc && mac)) return (EINVAL); } if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) || (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN)) return (E2BIG); /* Allocate session */ - cs = cesa_alloc_session(sc); - if (!cs) - return (ENOMEM); + cs = crypto_get_driver_session(cses); /* Prepare CESA configuration */ cs->cs_config = 0; cs->cs_ivlen = 1; cs->cs_mblen = 1; if (enc) { switch (enc->cri_alg) { case CRYPTO_AES_CBC: cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; cs->cs_ivlen = AES_BLOCK_LEN; break; case CRYPTO_DES_CBC: cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC; cs->cs_ivlen = DES_BLOCK_LEN; break; case CRYPTO_3DES_CBC: cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE | CESA_CSHD_CBC; cs->cs_ivlen = DES3_BLOCK_LEN; break; default: error = EINVAL; break; } } if (!error && mac) { switch (mac->cri_alg) { case CRYPTO_MD5: cs->cs_mblen = 1; cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN : mac->cri_mlen; cs->cs_config |= CESA_CSHD_MD5; break; case CRYPTO_MD5_HMAC: cs->cs_mblen = MD5_BLOCK_LEN; cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN : mac->cri_mlen; cs->cs_config |= CESA_CSHD_MD5_HMAC; if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) cs->cs_config |= CESA_CSHD_96_BIT_HMAC; break; case CRYPTO_SHA1: cs->cs_mblen = 1; cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN : mac->cri_mlen; cs->cs_config |= CESA_CSHD_SHA1; break; case CRYPTO_SHA1_HMAC: cs->cs_mblen = SHA1_BLOCK_LEN; cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN : mac->cri_mlen; cs->cs_config |= CESA_CSHD_SHA1_HMAC; if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) cs->cs_config |= CESA_CSHD_96_BIT_HMAC; break; case CRYPTO_SHA2_256_HMAC: cs->cs_mblen = SHA2_256_BLOCK_LEN; cs->cs_hlen = (mac->cri_mlen == 0) ? SHA2_256_HASH_LEN : mac->cri_mlen; cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; break; default: error = EINVAL; break; } } /* Save cipher key */ if (!error && enc && enc->cri_key) { cs->cs_klen = enc->cri_klen / 8; memcpy(cs->cs_key, enc->cri_key, cs->cs_klen); if (enc->cri_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs); } /* Save digest key */ if (!error && mac && mac->cri_key) error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key, mac->cri_klen / 8); - if (error) { - cesa_free_session(sc, cs); - return (EINVAL); - } + if (error) + return (error); - *sidp = cs->cs_sid; - return (0); } static int -cesa_freesession(device_t dev, uint64_t tid) -{ - struct cesa_session *cs; - struct cesa_softc *sc; - - sc = device_get_softc(dev); - cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid)); - if (!cs) - return (EINVAL); - - /* Free session */ - cesa_free_session(sc, cs); - - return (0); -} - -static int cesa_process(device_t dev, struct cryptop *crp, int hint) { struct cesa_request *cr; struct cesa_session *cs; struct cryptodesc *crd; struct cryptodesc *enc; struct cryptodesc *mac; struct cesa_softc *sc; int error; sc = device_get_softc(dev); crd = crp->crp_desc; enc = NULL; mac = NULL; error = 0; - /* Check session ID */ - cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); - if (!cs) { - crp->crp_etype = EINVAL; - crypto_done(crp); - return (0); - } + cs = crypto_get_driver_session(crp->crp_session); /* Check and parse input */ if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } if (cesa_is_hash(crd->crd_alg)) mac = crd; else enc = crd; crd = crd->crd_next; if (crd) { if (!enc && !cesa_is_hash(crd->crd_alg)) enc = crd; if (!mac && cesa_is_hash(crd->crd_alg)) mac = crd; if (crd->crd_next || !(enc && mac)) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } } /* * Get request descriptor. Block driver if there is no free * descriptors in pool. */ cr = cesa_alloc_request(sc); if (!cr) { CESA_LOCK(sc, sc); sc->sc_blocked = CRYPTO_SYMQ; CESA_UNLOCK(sc, sc); return (ERESTART); } /* Prepare request */ cr->cr_crp = crp; cr->cr_enc = enc; cr->cr_mac = mac; cr->cr_cs = cs; CESA_LOCK(sc, sessions); cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (enc && enc->crd_flags & CRD_F_ENCRYPT) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); else arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0); if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); } else if (enc) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); } if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) { cs->cs_klen = enc->crd_klen / 8; memcpy(cs->cs_key, enc->crd_key, cs->cs_klen); if (enc->crd_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs); } else error = E2BIG; } if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN) error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key, mac->crd_klen / 8); else error = E2BIG; } /* Convert request to chain of TDMA and SA descriptors */ if (!error) error = cesa_create_chain(sc, cr); cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CESA_UNLOCK(sc, sessions); if (error) { cesa_free_request(sc, cr); crp->crp_etype = error; crypto_done(crp); return (0); } bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Enqueue request to execution */ cesa_enqueue_request(sc, cr); /* Start execution, if we have no more requests in queue */ if ((hint & CRYPTO_HINT_MORE) == 0) cesa_execute(sc); return (0); } Index: head/sys/dev/cesa/cesa.h =================================================================== --- head/sys/dev/cesa/cesa.h (revision 336438) +++ head/sys/dev/cesa/cesa.h (revision 336439) @@ -1,377 +1,371 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2009-2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_CESA_H_ #define _DEV_CESA_H_ /* Maximum number of allocated sessions */ #define CESA_SESSIONS 64 /* Maximum number of queued requests */ #define CESA_REQUESTS 256 /* * CESA is able to process data only in CESA SRAM, which is quite small (2 kB). * We have to fit a packet there, which contains SA descriptor, keys, IV * and data to be processed. Every request must be converted into chain of * packets and each packet can hold about 1.75 kB of data. * * To process each packet we need at least 1 SA descriptor and at least 4 TDMA * descriptors. However there are cases when we use 2 SA and 8 TDMA descriptors * per packet. Number of used TDMA descriptors can increase beyond given values * if data in the request is fragmented in physical memory. * * The driver uses preallocated SA and TDMA descriptors pools to get best * performace. Size of these pools should match expected request size. Example: * * Expected average request size: 1.5 kB (Ethernet MTU) * Packets per average request: (1.5 kB / 1.75 kB) = 1 * SA decriptors per average request (worst case): 1 * 2 = 2 * TDMA desctiptors per average request (worst case): 1 * 8 = 8 * * More TDMA descriptors should be allocated, if data fragmentation is expected * (for example while processing mbufs larger than MCLBYTES). The driver may use * 2 additional TDMA descriptors per each discontinuity in the physical data * layout. */ /* Values below are optimized for requests containing about 1.5 kB of data */ #define CESA_SA_DESC_PER_REQ 2 #define CESA_TDMA_DESC_PER_REQ 8 #define CESA_SA_DESCRIPTORS (CESA_SA_DESC_PER_REQ * CESA_REQUESTS) #define CESA_TDMA_DESCRIPTORS (CESA_TDMA_DESC_PER_REQ * CESA_REQUESTS) /* Useful constants */ #define CESA_HMAC_TRUNC_LEN 12 #define CESA_MAX_FRAGMENTS 64 #define CESA_SRAM_SIZE 2048 /* * CESA_MAX_HASH_LEN is maximum length of hash generated by CESA. * As CESA supports MD5, SHA1 and SHA-256 this equals to 32 bytes. */ #define CESA_MAX_HASH_LEN 32 #define CESA_MAX_KEY_LEN 32 #define CESA_MAX_IV_LEN 16 #define CESA_MAX_HMAC_BLOCK_LEN 64 #define CESA_MAX_MKEY_LEN CESA_MAX_HMAC_BLOCK_LEN #define CESA_MAX_PACKET_SIZE (CESA_SRAM_SIZE - CESA_DATA(0)) #define CESA_MAX_REQUEST_SIZE 65535 /* Locking macros */ #define CESA_LOCK(sc, what) mtx_lock(&(sc)->sc_ ## what ## _lock) #define CESA_UNLOCK(sc, what) mtx_unlock(&(sc)->sc_ ## what ## _lock) #define CESA_LOCK_ASSERT(sc, what) \ mtx_assert(&(sc)->sc_ ## what ## _lock, MA_OWNED) /* Registers read/write macros */ #define CESA_REG_READ(sc, reg) \ bus_read_4((sc)->sc_res[RES_CESA_REGS], (reg)) #define CESA_REG_WRITE(sc, reg, val) \ bus_write_4((sc)->sc_res[RES_CESA_REGS], (reg), (val)) #define CESA_TDMA_READ(sc, reg) \ bus_read_4((sc)->sc_res[RES_TDMA_REGS], (reg)) #define CESA_TDMA_WRITE(sc, reg, val) \ bus_write_4((sc)->sc_res[RES_TDMA_REGS], (reg), (val)) /* Generic allocator for objects */ #define CESA_GENERIC_ALLOC_LOCKED(sc, obj, pool) do { \ CESA_LOCK(sc, pool); \ \ if (STAILQ_EMPTY(&(sc)->sc_free_ ## pool)) \ obj = NULL; \ else { \ obj = STAILQ_FIRST(&(sc)->sc_free_ ## pool); \ STAILQ_REMOVE_HEAD(&(sc)->sc_free_ ## pool, \ obj ## _stq); \ } \ \ CESA_UNLOCK(sc, pool); \ } while (0) #define CESA_GENERIC_FREE_LOCKED(sc, obj, pool) do { \ CESA_LOCK(sc, pool); \ STAILQ_INSERT_TAIL(&(sc)->sc_free_ ## pool, obj, \ obj ## _stq); \ CESA_UNLOCK(sc, pool); \ } while (0) /* CESA SRAM offset calculation macros */ #define CESA_SA_DATA(member) \ (sizeof(struct cesa_sa_hdesc) + offsetof(struct cesa_sa_data, member)) #define CESA_DATA(offset) \ (sizeof(struct cesa_sa_hdesc) + sizeof(struct cesa_sa_data) + offset) /* CESA memory and IRQ resources */ enum cesa_res_type { RES_TDMA_REGS, RES_CESA_REGS, RES_CESA_IRQ, RES_CESA_NUM }; struct cesa_tdma_hdesc { uint16_t cthd_byte_count; uint16_t cthd_flags; uint32_t cthd_src; uint32_t cthd_dst; uint32_t cthd_next; }; struct cesa_sa_hdesc { uint32_t cshd_config; uint16_t cshd_enc_src; uint16_t cshd_enc_dst; uint32_t cshd_enc_dlen; uint32_t cshd_enc_key; uint16_t cshd_enc_iv; uint16_t cshd_enc_iv_buf; uint16_t cshd_mac_src; uint16_t cshd_mac_total_dlen; uint16_t cshd_mac_dst; uint16_t cshd_mac_dlen; uint16_t cshd_mac_iv_in; uint16_t cshd_mac_iv_out; }; struct cesa_sa_data { uint8_t csd_key[CESA_MAX_KEY_LEN]; uint8_t csd_iv[CESA_MAX_IV_LEN]; uint8_t csd_hiv_in[CESA_MAX_HASH_LEN]; uint8_t csd_hiv_out[CESA_MAX_HASH_LEN]; uint8_t csd_hash[CESA_MAX_HASH_LEN]; }; struct cesa_dma_mem { void *cdm_vaddr; bus_addr_t cdm_paddr; bus_dma_tag_t cdm_tag; bus_dmamap_t cdm_map; }; struct cesa_tdma_desc { struct cesa_tdma_hdesc *ctd_cthd; bus_addr_t ctd_cthd_paddr; STAILQ_ENTRY(cesa_tdma_desc) ctd_stq; }; struct cesa_sa_desc { struct cesa_sa_hdesc *csd_cshd; bus_addr_t csd_cshd_paddr; STAILQ_ENTRY(cesa_sa_desc) csd_stq; }; struct cesa_session { - uint32_t cs_sid; uint32_t cs_config; unsigned int cs_klen; unsigned int cs_ivlen; unsigned int cs_hlen; unsigned int cs_mblen; uint8_t cs_key[CESA_MAX_KEY_LEN]; uint8_t cs_aes_dkey[CESA_MAX_KEY_LEN]; uint8_t cs_hiv_in[CESA_MAX_HASH_LEN]; uint8_t cs_hiv_out[CESA_MAX_HASH_LEN]; - - STAILQ_ENTRY(cesa_session) cs_stq; }; struct cesa_request { struct cesa_sa_data *cr_csd; bus_addr_t cr_csd_paddr; struct cryptop *cr_crp; struct cryptodesc *cr_enc; struct cryptodesc *cr_mac; struct cesa_session *cr_cs; bus_dmamap_t cr_dmap; int cr_dmap_loaded; STAILQ_HEAD(, cesa_tdma_desc) cr_tdesc; STAILQ_HEAD(, cesa_sa_desc) cr_sdesc; STAILQ_ENTRY(cesa_request) cr_stq; }; struct cesa_packet { STAILQ_HEAD(, cesa_tdma_desc) cp_copyin; STAILQ_HEAD(, cesa_tdma_desc) cp_copyout; unsigned int cp_size; unsigned int cp_offset; }; struct cesa_softc { device_t sc_dev; int32_t sc_cid; uint32_t sc_soc_id; struct resource *sc_res[RES_CESA_NUM]; void *sc_icookie; bus_dma_tag_t sc_data_dtag; int sc_error; int sc_tperr; uint8_t sc_cesa_engine_id; struct mtx sc_sc_lock; int sc_blocked; /* TDMA descriptors pool */ struct mtx sc_tdesc_lock; struct cesa_tdma_desc sc_tdesc[CESA_TDMA_DESCRIPTORS]; struct cesa_dma_mem sc_tdesc_cdm; STAILQ_HEAD(, cesa_tdma_desc) sc_free_tdesc; /* SA descriptors pool */ struct mtx sc_sdesc_lock; struct cesa_sa_desc sc_sdesc[CESA_SA_DESCRIPTORS]; struct cesa_dma_mem sc_sdesc_cdm; STAILQ_HEAD(, cesa_sa_desc) sc_free_sdesc; /* Requests pool */ struct mtx sc_requests_lock; struct cesa_request sc_requests[CESA_REQUESTS]; struct cesa_dma_mem sc_requests_cdm; STAILQ_HEAD(, cesa_request) sc_free_requests; STAILQ_HEAD(, cesa_request) sc_ready_requests; STAILQ_HEAD(, cesa_request) sc_queued_requests; - /* Sessions pool */ struct mtx sc_sessions_lock; - struct cesa_session sc_sessions[CESA_SESSIONS]; - STAILQ_HEAD(, cesa_session) sc_free_sessions; /* CESA SRAM Address */ bus_addr_t sc_sram_base_pa; vm_offset_t sc_sram_base_va; bus_size_t sc_sram_size; }; struct cesa_chain_info { struct cesa_softc *cci_sc; struct cesa_request *cci_cr; struct cryptodesc *cci_enc; struct cryptodesc *cci_mac; uint32_t cci_config; int cci_error; }; /* CESA descriptors flags definitions */ #define CESA_CTHD_OWNED (1 << 15) #define CESA_CSHD_MAC (0 << 0) #define CESA_CSHD_ENC (1 << 0) #define CESA_CSHD_MAC_AND_ENC (2 << 0) #define CESA_CSHD_ENC_AND_MAC (3 << 0) #define CESA_CSHD_OP_MASK (3 << 0) #define CESA_CSHD_MD5 (4 << 4) #define CESA_CSHD_SHA1 (5 << 4) #define CESA_CSHD_SHA2_256 (1 << 4) #define CESA_CSHD_MD5_HMAC (6 << 4) #define CESA_CSHD_SHA1_HMAC (7 << 4) #define CESA_CSHD_SHA2_256_HMAC (3 << 4) #define CESA_CSHD_96_BIT_HMAC (1 << 7) #define CESA_CSHD_DES (1 << 8) #define CESA_CSHD_3DES (2 << 8) #define CESA_CSHD_AES (3 << 8) #define CESA_CSHD_DECRYPT (1 << 12) #define CESA_CSHD_CBC (1 << 16) #define CESA_CSHD_3DES_EDE (1 << 20) #define CESA_CSH_AES_KLEN_128 (0 << 24) #define CESA_CSH_AES_KLEN_192 (1 << 24) #define CESA_CSH_AES_KLEN_256 (2 << 24) #define CESA_CSH_AES_KLEN_MASK (3 << 24) #define CESA_CSHD_FRAG_FIRST (1 << 30) #define CESA_CSHD_FRAG_LAST (2U << 30) #define CESA_CSHD_FRAG_MIDDLE (3U << 30) /* CESA registers definitions */ #define CESA_ICR 0x0E20 #define CESA_ICR_ACCTDMA (1 << 7) #define CESA_ICR_TPERR (1 << 12) #define CESA_ICM 0x0E24 #define CESA_ICM_ACCTDMA CESA_ICR_ACCTDMA #define CESA_ICM_TPERR CESA_ICR_TPERR /* CESA TDMA registers definitions */ #define CESA_TDMA_ND 0x0830 #define CESA_TDMA_CR 0x0840 #define CESA_TDMA_CR_DBL128 (4 << 0) #define CESA_TDMA_CR_ORDEN (1 << 4) #define CESA_TDMA_CR_SBL128 (4 << 6) #define CESA_TDMA_CR_NBS (1 << 11) #define CESA_TDMA_CR_ENABLE (1 << 12) #define CESA_TDMA_CR_FETCHND (1 << 13) #define CESA_TDMA_CR_ACTIVE (1 << 14) #define CESA_TDMA_NUM_OUTSTAND (2 << 16) #define CESA_TDMA_ECR 0x08C8 #define CESA_TDMA_ECR_MISS (1 << 0) #define CESA_TDMA_ECR_DOUBLE_HIT (1 << 1) #define CESA_TDMA_ECR_BOTH_HIT (1 << 2) #define CESA_TDMA_ECR_DATA_ERROR (1 << 3) #define CESA_TDMA_EMR 0x08CC #define CESA_TDMA_EMR_MISS CESA_TDMA_ECR_MISS #define CESA_TDMA_EMR_DOUBLE_HIT CESA_TDMA_ECR_DOUBLE_HIT #define CESA_TDMA_EMR_BOTH_HIT CESA_TDMA_ECR_BOTH_HIT #define CESA_TDMA_EMR_DATA_ERROR CESA_TDMA_ECR_DATA_ERROR /* CESA SA registers definitions */ #define CESA_SA_CMD 0x0E00 #define CESA_SA_CMD_ACTVATE (1 << 0) #define CESA_SA_CMD_SHA2 (1 << 31) #define CESA_SA_DPR 0x0E04 #define CESA_SA_CR 0x0E08 #define CESA_SA_CR_WAIT_FOR_TDMA (1 << 7) #define CESA_SA_CR_ACTIVATE_TDMA (1 << 9) #define CESA_SA_CR_MULTI_MODE (1 << 11) #define CESA_SA_SR 0x0E0C #define CESA_SA_SR_ACTIVE (1 << 0) #define CESA_TDMA_SIZE 0x1000 #define CESA_CESA_SIZE 0x1000 #define CESA0_TDMA_ADDR 0x90000 #define CESA0_CESA_ADDR 0x9D000 #define CESA1_TDMA_ADDR 0x92000 #define CESA1_CESA_ADDR 0x9F000 #endif Index: head/sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 336438) +++ head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 336439) @@ -1,2347 +1,2298 @@ /*- * Copyright (c) 2017 Chelsio Communications, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "common/common.h" #include "crypto/t4_crypto.h" /* * Requests consist of: * * +-------------------------------+ * | struct fw_crypto_lookaside_wr | * +-------------------------------+ * | struct ulp_txpkt | * +-------------------------------+ * | struct ulptx_idata | * +-------------------------------+ * | struct cpl_tx_sec_pdu | * +-------------------------------+ * | struct cpl_tls_tx_scmd_fmt | * +-------------------------------+ * | key context header | * +-------------------------------+ * | AES key | ----- For requests with AES * +-------------------------------+ - * | IPAD (16-byte aligned) | \ * +-------------------------------+ +---- For requests with HMAC * | OPAD (16-byte aligned) | / * +-------------------------------+ - * | GMAC H | ----- For AES-GCM * +-------------------------------+ - * | struct cpl_rx_phys_dsgl | \ * +-------------------------------+ +---- Destination buffer for * | PHYS_DSGL entries | / non-hash-only requests * +-------------------------------+ - * | 16 dummy bytes | ----- Only for hash-only requests * +-------------------------------+ * | IV | ----- If immediate IV * +-------------------------------+ * | Payload | ----- If immediate Payload * +-------------------------------+ - * | struct ulptx_sgl | \ * +-------------------------------+ +---- If payload via SGL * | SGL entries | / * +-------------------------------+ - * * Note that the key context must be padded to ensure 16-byte alignment. * For HMAC requests, the key consists of the partial hash of the IPAD * followed by the partial hash of the OPAD. * * Replies consist of: * * +-------------------------------+ * | struct cpl_fw6_pld | * +-------------------------------+ * | hash digest | ----- For HMAC request with * +-------------------------------+ 'hash_size' set in work request * * A 32-bit big-endian error status word is supplied in the last 4 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a * "MAC" error and bit 1 indicates a "PAD" error. * * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message * in the request is returned in data[1] of the CPL_FW6_PLD message. * * For block cipher replies, the updated IV is supplied in data[2] and * data[3] of the CPL_FW6_PLD message. * * For hash replies where the work request set 'hash_size' to request * a copy of the hash in the reply, the hash digest is supplied * immediately following the CPL_FW6_PLD message. */ /* * The crypto engine supports a maximum AAD size of 511 bytes. */ #define MAX_AAD_LEN 511 /* * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG * entries. While the CPL includes a 16-bit length field, the T6 can * sometimes hang if an error occurs while processing a request with a * single DSGL entry larger than 2k. */ #define MAX_RX_PHYS_DSGL_SGE 32 #define DSGL_SGE_MAXLEN 2048 /* * The adapter only supports requests with a total input or output * length of 64k-1 or smaller. Longer requests either result in hung * requests or incorrect results. */ #define MAX_REQUEST_SIZE 65535 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); struct ccr_session_hmac { struct auth_hash *auth_hash; int hash_len; unsigned int partial_digest_len; unsigned int auth_mode; unsigned int mk_size; char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; char opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; }; struct ccr_session_gmac { int hash_len; char ghash_h[GMAC_BLOCK_LEN]; }; struct ccr_session_blkcipher { unsigned int cipher_mode; unsigned int key_len; unsigned int iv_len; __be32 key_ctx_hdr; char enckey[CHCR_AES_MAX_KEY_LEN]; char deckey[CHCR_AES_MAX_KEY_LEN]; }; struct ccr_session { bool active; int pending; enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode; union { struct ccr_session_hmac hmac; struct ccr_session_gmac gmac; }; struct ccr_session_blkcipher blkcipher; }; struct ccr_softc { struct adapter *adapter; device_t dev; uint32_t cid; int tx_channel_id; - struct ccr_session *sessions; - int nsessions; struct mtx lock; bool detaching; struct sge_wrq *txq; struct sge_rxq *rxq; /* * Pre-allocate S/G lists used when preparing a work request. * 'sg_crp' contains an sglist describing the entire buffer * for a 'struct cryptop'. 'sg_ulptx' is used to describe * the data the engine should DMA as input via ULPTX_SGL. * 'sg_dsgl' is used to describe the destination that cipher * text and a tag should be written to. */ struct sglist *sg_crp; struct sglist *sg_ulptx; struct sglist *sg_dsgl; /* * Pre-allocate a dummy output buffer for the IV and AAD for * AEAD requests. */ char *iv_aad_buf; struct sglist *sg_iv_aad; /* Statistics. */ uint64_t stats_blkcipher_encrypt; uint64_t stats_blkcipher_decrypt; uint64_t stats_hmac; uint64_t stats_authenc_encrypt; uint64_t stats_authenc_decrypt; uint64_t stats_gcm_encrypt; uint64_t stats_gcm_decrypt; uint64_t stats_wr_nomem; uint64_t stats_inflight; uint64_t stats_mac_error; uint64_t stats_pad_error; uint64_t stats_bad_session; uint64_t stats_sglist_error; uint64_t stats_process_error; uint64_t stats_sw_fallback; }; /* * Crypto requests involve two kind of scatter/gather lists. * * Non-hash-only requests require a PHYS_DSGL that describes the * location to store the results of the encryption or decryption * operation. This SGL uses a different format (PHYS_DSGL) and should * exclude the crd_skip bytes at the start of the data as well as * any AAD or IV. For authenticated encryption requests it should * cover include the destination of the hash or tag. * * The input payload may either be supplied inline as immediate data, * or via a standard ULP_TX SGL. This SGL should include AAD, * ciphertext, and the hash or tag for authenticated decryption * requests. * * These scatter/gather lists can describe different subsets of the * buffer described by the crypto operation. ccr_populate_sglist() * generates a scatter/gather list that covers the entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) { int error; sglist_reset(sg); if (crp->crp_flags & CRYPTO_F_IMBUF) error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf); else if (crp->crp_flags & CRYPTO_F_IOV) error = sglist_append_uio(sg, (struct uio *)crp->crp_buf); else error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); return (error); } /* * Segments in 'sg' larger than 'maxsegsize' are counted as multiple * segments. */ static int ccr_count_sgl(struct sglist *sg, int maxsegsize) { int i, nsegs; nsegs = 0; for (i = 0; i < sg->sg_nseg; i++) nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); return (nsegs); } /* These functions deal with PHYS_DSGL for the reply buffer. */ static inline int ccr_phys_dsgl_len(int nsegs) { int len; len = (nsegs / 8) * sizeof(struct phys_sge_pairs); if ((nsegs % 8) != 0) { len += sizeof(uint16_t) * 8; len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); } return (len); } static void ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs) { struct sglist *sg; struct cpl_rx_phys_dsgl *cpl; struct phys_sge_pairs *sgl; vm_paddr_t paddr; size_t seglen; u_int i, j; sg = sc->sg_dsgl; cpl = dst; cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | V_CPL_RX_PHYS_DSGL_ISRDMA(0)); cpl->pcirlxorder_to_noofsgentr = htobe32( V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id); cpl->rss_hdr_int.hash_val = 0; sgl = (struct phys_sge_pairs *)(cpl + 1); j = 0; for (i = 0; i < sg->sg_nseg; i++) { seglen = sg->sg_segs[i].ss_len; paddr = sg->sg_segs[i].ss_paddr; do { sgl->addr[j] = htobe64(paddr); if (seglen > DSGL_SGE_MAXLEN) { sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); paddr += DSGL_SGE_MAXLEN; seglen -= DSGL_SGE_MAXLEN; } else { sgl->len[j] = htobe16(seglen); seglen = 0; } j++; if (j == 8) { sgl++; j = 0; } } while (seglen != 0); } MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); } /* These functions deal with the ULPTX_SGL for input payload. */ static inline int ccr_ulptx_sgl_len(int nsegs) { u_int n; nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (roundup2(n, 16)); } static void ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) { struct ulptx_sgl *usgl; struct sglist *sg; struct sglist_seg *ss; int i; sg = sc->sg_ulptx; MPASS(nsegs == sg->sg_nseg); ss = &sg->sg_segs[0]; usgl = dst; usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(ss->ss_len); usgl->addr0 = htobe64(ss->ss_paddr); ss++; for (i = 0; i < sg->sg_nseg - 1; i++) { usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); ss++; } } static bool ccr_use_imm_data(u_int transhdr_len, u_int input_len) { if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) return (false); if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > SGE_MAX_WR_LEN) return (false); return (true); } static void ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len, - u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size, + u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size, struct cryptop *crp) { u_int cctx_size; cctx_size = sizeof(struct _key_ctx) + kctx_len; crwr->wreq.op_to_cctx_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); crwr->wreq.len16_pkd = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); - crwr->wreq.session_id = htobe32(sid); + crwr->wreq.session_id = 0; crwr->wreq.rx_chid_to_rx_q_id = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) | V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id)); crwr->wreq.key_addr = 0; crwr->wreq.pld_size_hash_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); crwr->wreq.cookie = htobe64((uintptr_t)crp); crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DATAMODIFY(0) | V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1)); crwr->ulptx.len = htobe32( ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1)); crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len); } static int -ccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, - struct cryptop *crp) +ccr_hmac(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; struct cryptodesc *crd; char *dst; u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; u_int imm_len, iopad_size; int error, sgl_nsegs, sgl_len; crd = crp->crp_desc; /* Reject requests with too large of an input buffer. */ if (crd->crd_len > MAX_REQUEST_SIZE) return (EFBIG); axf = s->hmac.auth_hash; /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the context includes the aligned IPAD and * OPAD. */ kctx_len = iopad_size * 2; hash_size_in_response = axf->hashsize; transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); if (crd->crd_len == 0) { imm_len = axf->blocksize; sgl_nsegs = 0; sgl_len = 0; } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) { imm_len = crd->crd_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); - ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, + ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, hash_size_in_response, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize : crd->crd_len); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_LAST_FRAG(0) | V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1)); memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len); memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad, s->hmac.partial_digest_len); /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; if (crd->crd_len == 0) { dst[0] = 0x80; *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = htobe64(axf->blocksize << 3); } else if (imm_len != 0) crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { struct cryptodesc *crd; crd = crp->crp_desc; if (error == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->hmac.hash_len, (c_caddr_t)(cpl + 1)); } return (error); } static int -ccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, - struct cryptop *crp) +ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct cryptodesc *crd; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int imm_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; crd = crp->crp_desc; if (s->blkcipher.key_len == 0 || crd->crd_len == 0) return (EINVAL); if (crd->crd_alg == CRYPTO_AES_CBC && (crd->crd_len % AES_BLOCK_LEN) != 0) return (EINVAL); /* Reject requests with too large of an input buffer. */ if (crd->crd_len > MAX_REQUEST_SIZE) return (EFBIG); if (crd->crd_flags & CRD_F_ENCRYPT) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* The 'key' must be 128-bit aligned. */ kctx_len = roundup2(s->blkcipher.key_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); if (ccr_use_imm_data(transhdr_len, crd->crd_len + s->blkcipher.iv_len)) { imm_len = crd->crd_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); /* * Read the existing IV from the request or generate a random * one if none is provided. Optionally copy the generated IV * into the output buffer if requested. */ if (op_type == CHCR_ENCRYPT_OP) { if (crd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->blkcipher.iv_len, iv); } else { if (crd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); else crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->blkcipher.iv_len, iv); } - ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0, + ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) | V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) | V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (crd->crd_alg) { case CRYPTO_AES_CBC: if (crd->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case CRYPTO_AES_ICM: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case CRYPTO_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (crd->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, s->blkcipher.iv_len); dst += s->blkcipher.iv_len; if (imm_len != 0) crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } /* * 'hashsize' is the length of a full digest. 'authsize' is the * requested digest length for this operation which may be less * than 'hashsize'. */ static int ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) { if (authsize == 10) return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366); if (authsize == 12) return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT); if (authsize == hashsize / 2) return (CHCR_SCMD_HMAC_CTRL_DIV2); return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC); } static int -ccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, - struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) +ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, + struct cryptodesc *crda, struct cryptodesc *crde) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len, iopad_size; u_int aad_start, aad_len, aad_stop; u_int auth_start, auth_stop, auth_insert; u_int cipher_start, cipher_stop; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; /* * If there is a need in the future, requests with an empty * payload could be supported as HMAC-only requests. */ if (s->blkcipher.key_len == 0 || crde->crd_len == 0) return (EINVAL); if (crde->crd_alg == CRYPTO_AES_CBC && (crde->crd_len % AES_BLOCK_LEN) != 0) return (EINVAL); /* * Compute the length of the AAD (data covered by the * authentication descriptor but not the encryption * descriptor). To simplify the logic, AAD is only permitted * before the cipher/plain text, not after. This is true of * all currently-generated requests. */ if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) return (EINVAL); if (crda->crd_skip < crde->crd_skip) { if (crda->crd_skip + crda->crd_len > crde->crd_skip) aad_len = (crde->crd_skip - crda->crd_skip); else aad_len = crda->crd_len; } else aad_len = 0; if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN) return (EINVAL); axf = s->hmac.auth_hash; hash_size_in_response = s->hmac.hash_len; if (crde->crd_flags & CRD_F_ENCRYPT) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The output buffer consists of the cipher text followed by * the hash when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (s->blkcipher.iv_len + aad_len + crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (s->blkcipher.iv_len + aad_len + crde->crd_len > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, s->blkcipher.iv_len + aad_len); if (error) return (error); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the key context consists of the key followed * by the IPAD and OPAD. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = aad_len + crde->crd_len; /* * The firmware hangs if sent a request which is a * bit smaller than MAX_REQUEST_SIZE. In particular, the * firmware appears to require 512 - 16 bytes of spare room * along with the size of the hash even if the hash isn't * included in the input buffer. */ if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > MAX_REQUEST_SIZE) return (EFBIG); if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (aad_len != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_skip, aad_len); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } /* * Any auth-only data before the cipher region is marked as AAD. * Auth-data that overlaps with the cipher region is placed in * the auth section. */ if (aad_len != 0) { aad_start = s->blkcipher.iv_len + 1; aad_stop = aad_start + aad_len - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = s->blkcipher.iv_len + aad_len + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (aad_len == crda->crd_len) { auth_start = 0; auth_stop = 0; } else { if (aad_len != 0) auth_start = cipher_start; else auth_start = s->blkcipher.iv_len + crda->crd_skip - crde->crd_skip + 1; auth_stop = (crde->crd_skip + crde->crd_len) - (crda->crd_skip + crda->crd_len) + cipher_stop; } if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); /* * Read the existing IV from the request or generate a random * one if none is provided. Optionally copy the generated IV * into the output buffer if requested. */ if (op_type == CHCR_ENCRYPT_OP) { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } else { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } - ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, + ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (crde->crd_alg) { case CRYPTO_AES_CBC: if (crde->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case CRYPTO_AES_ICM: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case CRYPTO_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (crde->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len); memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, s->blkcipher.iv_len); dst += s->blkcipher.iv_len; if (imm_len != 0) { if (aad_len != 0) { crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip, aad_len, dst); dst += aad_len; } crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, crde->crd_len, dst); dst += crde->crd_len; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { struct cryptodesc *crd; /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * For a decryption request, the hardware may do a verification * of the HMAC which will fail if the existing HMAC isn't in the * buffer. If that happens, clear the error and copy the HMAC * from the CPL reply into the buffer. * * For encryption requests, crd should be the cipher request * which will have CRD_F_ENCRYPT set. For decryption * requests, crp_desc will be the HMAC request which should * not have this flag set. */ crd = crp->crp_desc; if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) && !(crd->crd_flags & CRD_F_ENCRYPT)) { crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->hmac.hash_len, (c_caddr_t)(cpl + 1)); error = 0; } return (error); } static int -ccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, - struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) +ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, + struct cryptodesc *crda, struct cryptodesc *crde) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * The crypto engine doesn't handle GCM requests with an empty * payload, so handle those in software instead. */ if (crde->crd_len == 0) return (EMSGSIZE); /* * AAD is only permitted before the cipher/plain text, not * after. */ if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) return (EMSGSIZE); if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN) return (EMSGSIZE); hash_size_in_response = s->gmac.hash_len; if (crde->crd_flags & CRD_F_ENCRYPT) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The IV handling for GCM in OCF is a bit more complicated in * that IPSec provides a full 16-byte IV (including the * counter), whereas the /dev/crypto interface sometimes * provides a full 16-byte IV (if no IV is provided in the * ioctl) and sometimes a 12-byte IV (if the IV was explicit). * * When provided a 12-byte IV, assume the IV is really 16 bytes * with a counter in the last 4 bytes initialized to 1. * * While iv_len is checked below, the value is currently * always set to 12 when creating a GCM session in this driver * due to limitations in OCF (there is no way to know what the * IV length of a given request will be). This means that the * driver always assumes as 12-byte IV for now. */ if (s->blkcipher.iv_len == 12) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + crda->crd_len + crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + crda->crd_len); if (error) return (error); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of the key followed * by the Galois hash key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = crda->crd_len + crde->crd_len; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (crda->crd_len != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_skip, crda->crd_len); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } if (crda->crd_len != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crda->crd_len - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crda->crd_len + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); /* * Read the existing IV from the request or generate a random * one if none is provided. Optionally copy the generated IV * into the output buffer if requested. * * If the input IV is 12 bytes, append an explicit 4-byte * counter of 1. */ if (op_type == CHCR_ENCRYPT_OP) { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } else { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } if (s->blkcipher.iv_len == 12) *(uint32_t *)&iv[12] = htobe32(1); - ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, - 0, crp); + ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, + crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. On encrypt it * should normally be set to 0 anyway (as the encrypt crd ends * at the end of the input). However, for decrypt the cipher * ends before the tag in the AUTHENC case (and authstop is * set to stop before the tag), but for GCM the cipher still * runs to the end of the buffer. Not sure if this is * intentional or a firmware quirk, but it is required for * working tag validation with GCM decryption. */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) | V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) { if (crda->crd_len != 0) { crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip, crda->crd_len, dst); dst += crda->crd_len; } crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, crde->crd_len, dst); dst += crde->crd_len; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the GMAC hash. */ return (error); } /* * Handle a GCM request that is not supported by the crypto engine by * performing the operation in software. Derived from swcr_authenc(). */ static void ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) { struct auth_hash *axf; struct enc_xform *exf; void *auth_ctx; uint8_t *kschedule; char block[GMAC_BLOCK_LEN]; char digest[GMAC_DIGEST_LEN]; char iv[AES_BLOCK_LEN]; int error, i, len; auth_ctx = NULL; kschedule = NULL; /* Initialize the MAC. */ switch (s->blkcipher.key_len) { case 16: axf = &auth_hash_nist_gmac_aes_128; break; case 24: axf = &auth_hash_nist_gmac_aes_192; break; case 32: axf = &auth_hash_nist_gmac_aes_256; break; default: error = EINVAL; goto out; } auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); if (auth_ctx == NULL) { error = ENOMEM; goto out; } axf->Init(auth_ctx); axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); /* Initialize the cipher. */ exf = &enc_xform_aes_nist_gcm; error = exf->setkey(&kschedule, s->blkcipher.enckey, s->blkcipher.key_len); if (error) goto out; /* * This assumes a 12-byte IV from the crp. See longer comment * above in ccr_gcm() for more details. */ if (crde->crd_flags & CRD_F_ENCRYPT) { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else arc4rand(iv, 12, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, 12, iv); } else { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, 12, iv); } *(uint32_t *)&iv[12] = htobe32(1); axf->Reinit(auth_ctx, iv, sizeof(iv)); /* MAC the AAD. */ for (i = 0; i < crda->crd_len; i += sizeof(block)) { len = imin(crda->crd_len - i, sizeof(block)); crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip + i, len, block); bzero(block + len, sizeof(block) - len); axf->Update(auth_ctx, block, sizeof(block)); } exf->reinit(kschedule, iv); /* Do encryption with MAC */ for (i = 0; i < crde->crd_len; i += sizeof(block)) { len = imin(crde->crd_len - i, sizeof(block)); crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip + i, len, block); bzero(block + len, sizeof(block) - len); if (crde->crd_flags & CRD_F_ENCRYPT) { exf->encrypt(kschedule, block); axf->Update(auth_ctx, block, len); crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_skip + i, len, block); } else { axf->Update(auth_ctx, block, len); } } /* Length block. */ bzero(block, sizeof(block)); ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8); ((uint32_t *)block)[3] = htobe32(crde->crd_len * 8); axf->Update(auth_ctx, block, sizeof(block)); /* Finalize MAC. */ axf->Final(digest, auth_ctx); /* Inject or validate tag. */ if (crde->crd_flags & CRD_F_ENCRYPT) { crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest), digest); error = 0; } else { char digest2[GMAC_DIGEST_LEN]; crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { error = 0; /* Tag matches, decrypt data. */ for (i = 0; i < crde->crd_len; i += sizeof(block)) { len = imin(crde->crd_len - i, sizeof(block)); crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip + i, len, block); bzero(block + len, sizeof(block) - len); exf->decrypt(kschedule, block); crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_skip + i, len, block); } } else error = EBADMSG; } exf->zerokey(&kschedule); out: if (auth_ctx != NULL) { memset(auth_ctx, 0, axf->ctxsize); free(auth_ctx, M_CCR); } crp->crp_etype = error; crypto_done(crp); } static void ccr_identify(driver_t *driver, device_t parent) { struct adapter *sc; sc = device_get_softc(parent); if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && device_find_child(parent, "ccr", -1) == NULL) device_add_child(parent, "ccr", -1); } static int ccr_probe(device_t dev) { device_set_desc(dev, "Chelsio Crypto Accelerator"); return (BUS_PROBE_DEFAULT); } static void ccr_sysctls(struct ccr_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.ccr.X. */ oid = device_get_sysctl_tree(sc->dev); children = SYSCTL_CHILDREN(oid); /* * dev.ccr.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, NULL, "statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, &sc->stats_hmac, 0, "HMAC requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 0, "Cipher encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 0, "Cipher decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD, &sc->stats_authenc_encrypt, 0, "Combined AES+HMAC encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD, &sc->stats_authenc_decrypt, 0, "Combined AES+HMAC decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, &sc->stats_inflight, 0, "Requests currently pending"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, &sc->stats_mac_error, 0, "MAC errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, &sc->stats_pad_error, 0, "Padding errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, &sc->stats_bad_session, 0, "Requests with invalid session ID"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, &sc->stats_sglist_error, 0, "Requests for which DMA mapping failed"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, &sc->stats_process_error, 0, "Requests failed during queueing"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD, &sc->stats_sw_fallback, 0, "Requests processed by falling back to software"); } static int ccr_attach(device_t dev) { struct ccr_softc *sc; int32_t cid; /* * TODO: Crypto requests will panic if the parent device isn't * initialized so that the queues are up and running. Need to * figure out how to handle that correctly, maybe just reject * requests if the adapter isn't fully initialized? */ sc = device_get_softc(dev); sc->dev = dev; sc->adapter = device_get_softc(device_get_parent(dev)); sc->txq = &sc->adapter->sge.ctrlq[0]; sc->rxq = &sc->adapter->sge.rxq[0]; - cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + cid = crypto_get_driverid(dev, sizeof(struct ccr_session), + CRYPTOCAP_F_HARDWARE); if (cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } sc->cid = cid; sc->adapter->ccr_softc = sc; /* XXX: TODO? */ sc->tx_channel_id = 0; mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); ccr_sysctls(sc); crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0); crypto_register(cid, CRYPTO_AES_CBC, 0, 0); crypto_register(cid, CRYPTO_AES_ICM, 0, 0); crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0); crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_XTS, 0, 0); return (0); } static int ccr_detach(device_t dev) { struct ccr_softc *sc; - int i; sc = device_get_softc(dev); mtx_lock(&sc->lock); - for (i = 0; i < sc->nsessions; i++) { - if (sc->sessions[i].active || sc->sessions[i].pending != 0) { - mtx_unlock(&sc->lock); - return (EBUSY); - } - } sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); - free(sc->sessions, M_CCR); + mtx_destroy(&sc->lock); sglist_free(sc->sg_iv_aad); free(sc->iv_aad_buf, M_CCR); sglist_free(sc->sg_dsgl); sglist_free(sc->sg_ulptx); sglist_free(sc->sg_crp); sc->adapter->ccr_softc = NULL; return (0); } static void ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx) { uint32_t *u32; uint64_t *u64; u_int i; u32 = (uint32_t *)dst; u64 = (uint64_t *)dst; switch (cri_alg) { case CRYPTO_SHA1_HMAC: for (i = 0; i < SHA1_HASH_LEN / 4; i++) u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]); break; case CRYPTO_SHA2_256_HMAC: for (i = 0; i < SHA2_256_HASH_LEN / 4; i++) u32[i] = htobe32(auth_ctx->sha256ctx.state[i]); break; case CRYPTO_SHA2_384_HMAC: for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) u64[i] = htobe64(auth_ctx->sha384ctx.state[i]); break; case CRYPTO_SHA2_512_HMAC: for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) u64[i] = htobe64(auth_ctx->sha512ctx.state[i]); break; } } static void ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key, int klen) { union authctx auth_ctx; struct auth_hash *axf; u_int i; /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ axf = s->hmac.auth_hash; klen /= 8; if (klen > axf->blocksize) { axf->Init(&auth_ctx); axf->Update(&auth_ctx, key, klen); axf->Final(s->hmac.ipad, &auth_ctx); klen = axf->hashsize; } else memcpy(s->hmac.ipad, key, klen); memset(s->hmac.ipad + klen, 0, axf->blocksize - klen); memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); for (i = 0; i < axf->blocksize; i++) { s->hmac.ipad[i] ^= HMAC_IPAD_VAL; s->hmac.opad[i] ^= HMAC_OPAD_VAL; } /* * Hash the raw ipad and opad and store the partial result in * the same buffer. */ axf->Init(&auth_ctx); axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize); ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx); axf->Init(&auth_ctx); axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize); ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx); } /* * Borrowed from AES_GMAC_Setkey(). */ static void ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen) { static char zeroes[GMAC_BLOCK_LEN]; uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)]; int rounds; rounds = rijndaelKeySetupEnc(keysched, key, klen); rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h); } static int ccr_aes_check_keylen(int alg, int klen) { switch (klen) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (EINVAL); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (EINVAL); break; default: return (EINVAL); } return (0); } static void ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen) { unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; unsigned int opad_present; if (alg == CRYPTO_AES_XTS) kbits = klen / 2; else kbits = klen; switch (kbits) { case 128: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; break; case 192: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; break; case 256: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen / 8; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); switch (alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_XTS: t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); break; } kctx_len = roundup2(s->blkcipher.key_len, 16); switch (s->mode) { case AUTHENC: mk_size = s->hmac.mk_size; opad_present = 1; iopad_size = roundup2(s->hmac.partial_digest_len, 16); kctx_len += iopad_size * 2; break; case GCM: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; opad_present = 0; kctx_len += GMAC_BLOCK_LEN; break; default: mk_size = CHCR_KEYCTX_NO_KEY; opad_present = 0; break; } kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) | V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); } static int -ccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) +ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct ccr_softc *sc; struct ccr_session *s; struct auth_hash *auth_hash; struct cryptoini *c, *hash, *cipher; unsigned int auth_mode, cipher_mode, iv_len, mk_size; unsigned int partial_digest_len; - int error, i, sess; + int error; bool gcm_hash; - if (sidp == NULL || cri == NULL) + if (cri == NULL) return (EINVAL); gcm_hash = false; cipher = NULL; hash = NULL; auth_hash = NULL; auth_mode = CHCR_SCMD_AUTH_MODE_NOP; cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP; iv_len = 0; mk_size = 0; partial_digest_len = 0; for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (hash) return (EINVAL); hash = c; switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; partial_digest_len = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: gcm_hash = true; auth_mode = CHCR_SCMD_AUTH_MODE_GHASH; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; break; } break; case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_XTS: if (cipher) return (EINVAL); cipher = c; switch (c->cri_alg) { case CRYPTO_AES_CBC: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_ICM: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_NIST_GCM_16: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM; iv_len = AES_GCM_IV_LEN; break; case CRYPTO_AES_XTS: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; iv_len = AES_BLOCK_LEN; break; } if (c->cri_key != NULL) { error = ccr_aes_check_keylen(c->cri_alg, c->cri_klen); if (error) return (error); } break; default: return (EINVAL); } } if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM)) return (EINVAL); if (hash == NULL && cipher == NULL) return (EINVAL); if (hash != NULL && hash->cri_key == NULL) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } - sess = -1; - for (i = 0; i < sc->nsessions; i++) { - if (!sc->sessions[i].active && sc->sessions[i].pending == 0) { - sess = i; - break; - } - } - if (sess == -1) { - s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR, - M_NOWAIT | M_ZERO); - if (s == NULL) { - mtx_unlock(&sc->lock); - return (ENOMEM); - } - if (sc->sessions != NULL) - memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions); - sess = sc->nsessions; - free(sc->sessions, M_CCR); - sc->sessions = s; - sc->nsessions++; - } - s = &sc->sessions[sess]; + s = crypto_get_driver_session(cses); if (gcm_hash) s->mode = GCM; else if (hash != NULL && cipher != NULL) s->mode = AUTHENC; else if (hash != NULL) s->mode = HMAC; else { MPASS(cipher != NULL); s->mode = BLKCIPHER; } if (gcm_hash) { if (hash->cri_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = hash->cri_mlen; ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen); } else if (hash != NULL) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; s->hmac.mk_size = mk_size; s->hmac.partial_digest_len = partial_digest_len; if (hash->cri_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = hash->cri_mlen; ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key, hash->cri_klen); } if (cipher != NULL) { s->blkcipher.cipher_mode = cipher_mode; s->blkcipher.iv_len = iv_len; if (cipher->cri_key != NULL) ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key, cipher->cri_klen); } s->active = true; mtx_unlock(&sc->lock); - - *sidp = sess; return (0); } -static int -ccr_freesession(device_t dev, uint64_t tid) +static void +ccr_freesession(device_t dev, crypto_session_t cses) { struct ccr_softc *sc; - uint32_t sid; - int error; + struct ccr_session *s; sc = device_get_softc(dev); - sid = CRYPTO_SESID2LID(tid); + s = crypto_get_driver_session(cses); mtx_lock(&sc->lock); - if (sid >= sc->nsessions || !sc->sessions[sid].active) - error = EINVAL; - else { - if (sc->sessions[sid].pending != 0) - device_printf(dev, - "session %d freed with %d pending requests\n", sid, - sc->sessions[sid].pending); - sc->sessions[sid].active = false; - error = 0; - } + if (s->pending != 0) + device_printf(dev, + "session %p freed with %d pending requests\n", s, + s->pending); + s->active = false; mtx_unlock(&sc->lock); - return (error); } static int ccr_process(device_t dev, struct cryptop *crp, int hint) { struct ccr_softc *sc; struct ccr_session *s; struct cryptodesc *crd, *crda, *crde; - uint32_t sid; int error; if (crp == NULL) return (EINVAL); crd = crp->crp_desc; - sid = CRYPTO_SESID2LID(crp->crp_sid); + s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); - mtx_lock(&sc->lock); - if (sid >= sc->nsessions || !sc->sessions[sid].active) { - sc->stats_bad_session++; - error = EINVAL; - goto out; - } + mtx_lock(&sc->lock); error = ccr_populate_sglist(sc->sg_crp, crp); if (error) { sc->stats_sglist_error++; goto out; } - s = &sc->sessions[sid]; switch (s->mode) { case HMAC: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key, crd->crd_klen); - error = ccr_hmac(sc, sid, s, crp); + error = ccr_hmac(sc, s, crp); if (error == 0) sc->stats_hmac++; break; case BLKCIPHER: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crd->crd_alg, crd->crd_klen); if (error) break; ccr_aes_setkey(s, crd->crd_alg, crd->crd_key, crd->crd_klen); } - error = ccr_blkcipher(sc, sid, s, crp); + error = ccr_blkcipher(sc, s, crp); if (error == 0) { if (crd->crd_flags & CRD_F_ENCRYPT) sc->stats_blkcipher_encrypt++; else sc->stats_blkcipher_decrypt++; } break; case AUTHENC: error = 0; switch (crd->crd_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_XTS: /* Only encrypt-then-authenticate supported. */ crde = crd; crda = crd->crd_next; if (!(crde->crd_flags & CRD_F_ENCRYPT)) { error = EINVAL; break; } break; default: crda = crd; crde = crd->crd_next; if (crde->crd_flags & CRD_F_ENCRYPT) { error = EINVAL; break; } break; } if (error) break; if (crda->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key, crda->crd_klen); if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error) break; ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } - error = ccr_authenc(sc, sid, s, crp, crda, crde); + error = ccr_authenc(sc, s, crp, crda, crde); if (error == 0) { if (crde->crd_flags & CRD_F_ENCRYPT) sc->stats_authenc_encrypt++; else sc->stats_authenc_decrypt++; } break; case GCM: error = 0; if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) { crde = crd; crda = crd->crd_next; } else { crda = crd; crde = crd->crd_next; } if (crda->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen); if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error) break; ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } if (crde->crd_len == 0) { mtx_unlock(&sc->lock); ccr_gcm_soft(s, crp, crda, crde); return (0); } - error = ccr_gcm(sc, sid, s, crp, crda, crde); + error = ccr_gcm(sc, s, crp, crda, crde); if (error == EMSGSIZE) { sc->stats_sw_fallback++; mtx_unlock(&sc->lock); ccr_gcm_soft(s, crp, crda, crde); return (0); } if (error == 0) { if (crde->crd_flags & CRD_F_ENCRYPT) sc->stats_gcm_encrypt++; else sc->stats_gcm_decrypt++; } break; } if (error == 0) { s->pending++; sc->stats_inflight++; } else sc->stats_process_error++; out: mtx_unlock(&sc->lock); if (error) { crp->crp_etype = error; crypto_done(crp); } return (0); } static int do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct ccr_softc *sc = iq->adapter->ccr_softc; struct ccr_session *s; const struct cpl_fw6_pld *cpl; struct cryptop *crp; - uint32_t sid, status; + uint32_t status; int error; if (m != NULL) cpl = mtod(m, const void *); else cpl = (const void *)(rss + 1); crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); - sid = CRYPTO_SESID2LID(crp->crp_sid); + s = crypto_get_driver_session(crp->crp_session); status = be64toh(cpl->data[0]); if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) error = EBADMSG; else error = 0; mtx_lock(&sc->lock); - MPASS(sid < sc->nsessions); - s = &sc->sessions[sid]; s->pending--; sc->stats_inflight--; switch (s->mode) { case HMAC: error = ccr_hmac_done(sc, s, crp, cpl, error); break; case BLKCIPHER: error = ccr_blkcipher_done(sc, s, crp, cpl, error); break; case AUTHENC: error = ccr_authenc_done(sc, s, crp, cpl, error); break; case GCM: error = ccr_gcm_done(sc, s, crp, cpl, error); break; } if (error == EBADMSG) { if (CHK_MAC_ERR_BIT(status)) sc->stats_mac_error++; if (CHK_PAD_ERR_BIT(status)) sc->stats_pad_error++; } mtx_unlock(&sc->lock); crp->crp_etype = error; crypto_done(crp); m_freem(m); return (0); } static int ccr_modevent(module_t mod, int cmd, void *arg) { switch (cmd) { case MOD_LOAD: t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); return (0); case MOD_UNLOAD: t4_register_cpl_handler(CPL_FW6_PLD, NULL); return (0); default: return (EOPNOTSUPP); } } static device_method_t ccr_methods[] = { DEVMETHOD(device_identify, ccr_identify), DEVMETHOD(device_probe, ccr_probe), DEVMETHOD(device_attach, ccr_attach), DEVMETHOD(device_detach, ccr_detach), DEVMETHOD(cryptodev_newsession, ccr_newsession), DEVMETHOD(cryptodev_freesession, ccr_freesession), DEVMETHOD(cryptodev_process, ccr_process), DEVMETHOD_END }; static driver_t ccr_driver = { "ccr", ccr_methods, sizeof(struct ccr_softc) }; static devclass_t ccr_devclass; DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); MODULE_VERSION(ccr, 1); MODULE_DEPEND(ccr, crypto, 1, 1, 1); MODULE_DEPEND(ccr, t6nex, 1, 1, 1); Index: head/sys/dev/hifn/hifn7751.c =================================================================== --- head/sys/dev/hifn/hifn7751.c (revision 336438) +++ head/sys/dev/hifn/hifn7751.c (revision 336439) @@ -1,2932 +1,2863 @@ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * Copyright (c) 2003 Hifn Inc. * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * Driver for various Hifn encryption processors. */ #include "opt_hifn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef HIFN_RNDTEST #include #endif #include #include #ifdef HIFN_VULCANDEV #include #include static struct cdevsw vulcanpk_cdevsw; /* forward declaration */ #endif /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static int hifn_shutdown(device_t); -static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *); -static int hifn_freesession(device_t, u_int64_t); +static int hifn_newsession(device_t, crypto_session_t, struct cryptoini *); static int hifn_process(device_t, struct cryptop *, int); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, hifn_newsession), - DEVMETHOD(cryptodev_freesession,hifn_freesession), DEVMETHOD(cryptodev_process, hifn_process), DEVMETHOD_END }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); #ifdef HIFN_RNDTEST MODULE_DEPEND(hifn, rndtest, 1, 1, 1); #endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (BUS_PROBE_DEFAULT); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_HIFN); } static u_int checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) { if (v > max) { device_printf(dev, "Warning, %s %u out of range, " "using max %u\n", what, v, max); v = max; } else if (v < min) { device_printf(dev, "Warning, %s %u out of range, " "using min %u\n", what, v, min); v = min; } return v; } /* * Select PLL configuration for 795x parts. This is complicated in * that we cannot determine the optimal parameters without user input. * The reference clock is derived from an external clock through a * multiplier. The external clock is either the host bus (i.e. PCI) * or an external clock generator. When using the PCI bus we assume * the clock is either 33 or 66 MHz; for an external source we cannot * tell the speed. * * PLL configuration is done with a string: "pci" for PCI bus, or "ext" * for an external source, followed by the frequency. We calculate * the appropriate multiplier and PLL register contents accordingly. * When no configuration is given we default to "pci66" since that * always will allow the card to work. If a card is using the PCI * bus clock and in a 33MHz slot then it will be operating at half * speed until the correct information is provided. * * We use a default setting of "ext66" because according to Mike Ham * of HiFn, almost every board in existence has an external crystal * populated at 66Mhz. Using PCI can be a problem on modern motherboards, * because PCI33 can have clocks from 0 to 33Mhz, and some have * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. */ static void hifn_getpllconfig(device_t dev, u_int *pll) { const char *pllspec; u_int freq, mul, fl, fh; u_int32_t pllconfig; char *nxt; if (resource_string_value("hifn", device_get_unit(dev), "pllconfig", &pllspec)) pllspec = "ext66"; fl = 33, fh = 66; pllconfig = 0; if (strncmp(pllspec, "ext", 3) == 0) { pllspec += 3; pllconfig |= HIFN_PLL_REF_SEL; switch (pci_get_device(dev)) { case PCI_PRODUCT_HIFN_7955: case PCI_PRODUCT_HIFN_7956: fl = 20, fh = 100; break; #ifdef notyet case PCI_PRODUCT_HIFN_7954: fl = 20, fh = 66; break; #endif } } else if (strncmp(pllspec, "pci", 3) == 0) pllspec += 3; freq = strtoul(pllspec, &nxt, 10); if (nxt == pllspec) freq = 66; else freq = checkmaxmin(dev, "frequency", freq, fl, fh); /* * Calculate multiplier. We target a Fck of 266 MHz, * allowing only even values, possibly rounded down. * Multipliers > 8 must set the charge pump current. */ mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; if (mul > 8) pllconfig |= HIFN_PLL_IS; *pll = pllconfig; } /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); caddr_t kva; int rseg, rid; char rbase; u_int16_t ena, rev; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 and 795x have a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * The 795x parts support AES. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; /* * Select PLL configuration. This depends on the * bus and board design and must be manually configured * if the default setting is unacceptable. */ hifn_getpllconfig(dev, &sc->sc_pllconfig); } /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ pci_enable_busmaster(dev); rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); /* XXX can't dynamically determine ram type for 795x; force dram */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_drammodel = 1; else if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's'); if (sc->sc_flags & HIFN_IS_7956) printf(", pll=0x%x<%s clk, %ux mult>", sc->sc_pllconfig, sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); printf("\n"); - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct hifn_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (ena) { case HIFN_PUSTAT_ENA_2: crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); if (sc->sc_flags & HIFN_HAS_AES) crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); /*FALLTHROUGH*/ case HIFN_PUSTAT_ENA_1: crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); callout_init(&sc->sc_tickto, 1); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); /* disable interrupts */ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); #ifdef HIFN_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet hifn_stop(sc); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; #ifdef HIFN_RNDTEST sc->sc_rndtest = rndtest_attach(sc->sc_dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); #ifdef HIFN_VULCANDEV sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "vulcanpk"); sc->sc_pkdev->si_drv1 = sc; #endif } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { /* ONLY VALID ON 7811!!!! */ for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } else { /* set up DMA configuration register #2 */ /* turn off all PK and BAR0 swaps */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < nitems(pci2id); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); if (sc->sc_flags & HIFN_IS_7956) { u_int32_t pll; WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); /* turn off the clocks and insure bypass is set */ pll = READ_REG_1(sc, HIFN_1_PLL); pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) | HIFN_PLL_BP | HIFN_PLL_MBSET; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* change configuration */ pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* disable bypass */ pll &= ~HIFN_PLL_BP; WRITE_REG_1(sc, HIFN_1_PLL, pll); /* enable clocks with new configuration */ pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; WRITE_REG_1(sc, HIFN_1_PLL, pll); } else { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); } WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; /* * 7955/7956 has internal context memory of 32K */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_maxses = 32768 / ctxsize; else sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; if (sc->sc_flags & HIFN_IS_7956) { /* * 7955/7956 have a fixed internal ram of only 32K. */ sc->sc_ramsize = 32768; } else { cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); } return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = sc->sc_cmdi++; sc->sc_cmdk = sc->sc_cmdi; if (sc->sc_srci == HIFN_D_SRC_RSIZE) { sc->sc_srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = sc->sc_srci++; sc->sc_srck = sc->sc_srci; if (sc->sc_dsti == HIFN_D_DST_RSIZE) { sc->sc_dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = sc->sc_dsti++; sc->sc_dstk = sc->sc_dsti; if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = sc->sc_resi++; sc->sc_resk = sc->sc_resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0; sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0; sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, len, ivlen; u_int32_t dlen, slen; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16( ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = cmd->maccrd->crd_len; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = cmd->enccrd->crd_len; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_3DES: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); buf_pos += HIFN_3DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_DES: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); buf_pos += HIFN_DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_RC4: len = 256; do { int clen; clen = MIN(cmd->cklen, len); bcopy(cmd->ck, buf_pos, clen); len -= clen; buf_pos += clen; } while (len > 0); bzero(buf_pos, 4); buf_pos += 4; break; case HIFN_CRYPT_CMD_ALG_AES: /* * AES keys are variable 128, 192 and * 256 bits (16, 24 and 32 bytes). */ bcopy(cmd->ck, buf_pos, cmd->cklen); buf_pos += cmd->cklen; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: ivlen = HIFN_AES_IV_LENGTH; break; default: ivlen = HIFN_IV_LENGTH; break; } bcopy(cmd->iv, buf_pos, ivlen); buf_pos += ivlen; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static __inline int hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } return (idx); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = sc->sc_dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); sc->sc_dsti = idx; sc->sc_dstu += used; return (idx); } static __inline int hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = sc->sc_srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = hifn_dmamap_srcwrap(sc, idx); } sc->sc_srci = idx; sc->sc_srcu += src->nsegs; return (idx); } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen, csr; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ HIFN_LOCK(sc); if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE || (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", sc->sc_cmdu, sc->sc_resu); } #endif hifnstats.hst_nomem_cr++; HIFN_UNLOCK(sc); return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; HIFN_UNLOCK(sc); return (ENOMEM); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } } else { err = EINVAL; goto err_srcmap1; } if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else { if (crp->crp_flags & CRYPTO_F_IOV) { err = EINVAL; goto err_srcmap; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == cmd->src_m, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (cmd->src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_NOWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m0, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; } } if (cmd->dst_map == NULL) { if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_dstmap1; } } } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", sc->sc_srcu, cmd->src_nsegs, sc->sc_dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = sc->sc_cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->sc_cmdu++; /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (sc->sc_cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = sc->sc_resi++; KASSERT(sc->sc_hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); sc->sc_hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_resu++; if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); csr = 0; if (sc->sc_c_busy == 0) { csr |= HIFN_DMACSR_C_CTRL_ENA; sc->sc_c_busy = 1; } if (sc->sc_s_busy == 0) { csr |= HIFN_DMACSR_S_CTRL_ENA; sc->sc_s_busy = 1; } if (sc->sc_r_busy == 0) { csr |= HIFN_DMACSR_R_CTRL_ENA; sc->sc_r_busy = 1; } if (sc->sc_d_busy == 0) { csr |= HIFN_DMACSR_D_CTRL_ENA; sc->sc_d_busy = 1; } if (csr) WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; HIFN_UNLOCK(sc); KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); HIFN_UNLOCK(sc); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { u_int32_t r = 0; if (sc->sc_cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (sc->sc_srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (sc->sc_dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (sc->sc_resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) return; HIFN_LOCK(sc); dma = sc->sc_dma; #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi, sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); } #endif WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } sc->sc_resk = i; sc->sc_resu = u; i = sc->sc_srck; u = sc->sc_srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_srck = i; sc->sc_srcu = u; i = sc->sc_cmdk; u = sc->sc_cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } sc->sc_cmdk = i; sc->sc_cmdu = u; HIFN_UNLOCK(sc); if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int -hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +hifn_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct hifn_softc *sc = device_get_softc(dev); struct cryptoini *c; - int mac = 0, cry = 0, sesn; - struct hifn_session *ses = NULL; + int mac = 0, cry = 0; + struct hifn_session *ses; KASSERT(sc != NULL, ("hifn_newsession: null softc")); - if (sidp == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); - HIFN_LOCK(sc); - if (sc->sc_sessions == NULL) { - ses = sc->sc_sessions = (struct hifn_session *)malloc( - sizeof(*ses), M_DEVBUF, M_NOWAIT); - if (ses == NULL) { - HIFN_UNLOCK(sc); - return (ENOMEM); - } - sesn = 0; - sc->sc_nsessions = 1; - } else { - for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { - if (!sc->sc_sessions[sesn].hs_used) { - ses = &sc->sc_sessions[sesn]; - break; - } - } + ses = crypto_get_driver_session(cses); - if (ses == NULL) { - sesn = sc->sc_nsessions; - ses = (struct hifn_session *)malloc((sesn + 1) * - sizeof(*ses), M_DEVBUF, M_NOWAIT); - if (ses == NULL) { - HIFN_UNLOCK(sc); - return (ENOMEM); - } - bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); - bzero(sc->sc_sessions, sesn * sizeof(*ses)); - free(sc->sc_sessions, M_DEVBUF); - sc->sc_sessions = ses; - ses = &sc->sc_sessions[sesn]; - sc->sc_nsessions++; - } - } - HIFN_UNLOCK(sc); - - bzero(ses, sizeof(*ses)); - ses->hs_used = 1; - for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (mac) return (EINVAL); mac = 1; ses->hs_mlen = c->cri_mlen; if (ses->hs_mlen == 0) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_MD5_HMAC: ses->hs_mlen = 16; break; case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: ses->hs_mlen = 20; break; } } break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_AES_CBC: /* XXX this may read fewer, does it matter? */ read_random(ses->hs_iv, c->cri_alg == CRYPTO_AES_CBC ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); /*FALLTHROUGH*/ case CRYPTO_ARC4: if (cry) return (EINVAL); cry = 1; break; default: return (EINVAL); } } if (mac == 0 && cry == 0) return (EINVAL); - - *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); - return (0); } /* - * Deallocate a session. - * XXX this routine should run a zero'd mac/encrypt key into context ram. - * XXX to blow away any keys already stored there. + * XXX freesession routine should run a zero'd mac/encrypt key into context + * ram. to blow away any keys already stored there. */ -static int -hifn_freesession(device_t dev, u_int64_t tid) -{ - struct hifn_softc *sc = device_get_softc(dev); - int session, error; - u_int32_t sid = CRYPTO_SESID2LID(tid); - KASSERT(sc != NULL, ("hifn_freesession: null softc")); - if (sc == NULL) - return (EINVAL); - - HIFN_LOCK(sc); - session = HIFN_SESSION(sid); - if (session < sc->sc_nsessions) { - bzero(&sc->sc_sessions[session], sizeof(struct hifn_session)); - error = 0; - } else - error = EINVAL; - HIFN_UNLOCK(sc); - - return (error); -} - static int hifn_process(device_t dev, struct cryptop *crp, int hint) { struct hifn_softc *sc = device_get_softc(dev); struct hifn_command *cmd = NULL; - int session, err, ivlen; + int err, ivlen; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; + struct hifn_session *ses; if (crp == NULL || crp->crp_callback == NULL) { hifnstats.hst_invalid++; return (EINVAL); } - session = HIFN_SESSION(crp->crp_sid); - if (sc == NULL || session >= sc->sc_nsessions) { - err = EINVAL; - goto errout; - } - + ses = crypto_get_driver_session(crp->crp_session); cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { cmd->src_m = (struct mbuf *)crp->crp_buf; cmd->dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { cmd->src_io = (struct uio *)crp->crp_buf; cmd->dst_io = (struct uio *)crp->crp_buf; } else { err = EINVAL; goto errout; /* XXX we don't handle contiguous buffers! */ } crd1 = crp->crp_desc; if (crd1 == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) cmd->base_masks |= HIFN_BASE_CMD_DECODE; maccrd = NULL; enccrd = crd1; } else { err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { cmd->base_masks = HIFN_BASE_CMD_DECODE; maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the 7751 as requested */ err = EINVAL; goto errout; } } if (enccrd) { cmd->enccrd = enccrd; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (enccrd->crd_alg) { case CRYPTO_ARC4: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; break; case CRYPTO_DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_3DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_AES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } if (enccrd->crd_alg != CRYPTO_ARC4) { ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else - bcopy(sc->sc_sessions[session].hs_iv, - cmd->iv, ivlen); + bcopy(ses->hs_iv, cmd->iv, ivlen); if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, cmd->iv, ivlen); else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } } if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; cmd->ck = enccrd->crd_key; cmd->cklen = enccrd->crd_klen >> 3; cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; /* * Need to specify the size for the AES key in the masks. */ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == HIFN_CRYPT_CMD_ALG_AES) { switch (cmd->cklen) { case 16: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; break; case 24: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; break; case 32: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; break; default: err = EINVAL; goto errout; } } } if (maccrd) { cmd->maccrd = maccrd; cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (maccrd->crd_alg) { case CRYPTO_MD5: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_MD5_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || maccrd->crd_alg == CRYPTO_MD5_HMAC) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); bzero(cmd->mac + (maccrd->crd_klen >> 3), HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); } } cmd->crp = crp; - cmd->session_num = session; + cmd->session = ses; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); return (err); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->src_m != cmd->dst_m) { m_freem(cmd->src_m); crp->crp_buf = (caddr_t)cmd->dst_m; } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } sc->sc_resk = i; sc->sc_resu = u; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; struct cryptodesc *crd; struct mbuf *m; int totlen, i, u, ivlen; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (cmd->src_m != cmd->dst_m) { crp->crp_buf = (caddr_t)cmd->dst_m; totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; m_freem(cmd->src_m); } } if (cmd->sloplen != 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); } i = sc->sc_dstk; u = sc->sc_dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_dstk = i; sc->sc_dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == HIFN_BASE_CMD_CRYPT) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC && crd->crd_alg != CRYPTO_AES_CBC) continue; ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, - cmd->softc->sc_sessions[cmd->session_num].hs_iv); + cmd->session->hs_iv); break; } } if (macbuf != NULL) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int len; if (crd->crd_alg != CRYPTO_MD5 && crd->crd_alg != CRYPTO_SHA1 && crd->crd_alg != CRYPTO_MD5_HMAC && crd->crd_alg != CRYPTO_SHA1_HMAC) { continue; } - len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen; + len = cmd->session->hs_mlen; crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, len, macbuf); break; } } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } #ifdef HIFN_VULCANDEV /* * this code provides support for mapping the PK engine's register * into a userspace program. * */ static int vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct hifn_softc *sc; vm_paddr_t pd; void *b; sc = dev->si_drv1; pd = rman_get_start(sc->sc_bar1res); b = rman_get_virtual(sc->sc_bar1res); #if 0 printf("vpk mmap: %p(%016llx) offset=%lld\n", b, (unsigned long long)pd, offset); hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0); #endif if (offset == 0) { *paddr = pd; return (0); } return (-1); } static struct cdevsw vulcanpk_cdevsw = { .d_version = D_VERSION, .d_mmap = vulcanpk_mmap, .d_name = "vulcanpk", }; #endif /* HIFN_VULCANDEV */ Index: head/sys/dev/hifn/hifn7751var.h =================================================================== --- head/sys/dev/hifn/hifn7751var.h (revision 336438) +++ head/sys/dev/hifn/hifn7751var.h (revision 336439) @@ -1,365 +1,354 @@ /* $FreeBSD$ */ /* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * * Please send any comments, feedback, bug-fixes, or feature requests to * software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef __HIFN7751VAR_H__ #define __HIFN7751VAR_H__ #ifdef _KERNEL /* * Some configurable values for the driver. By default command+result * descriptor rings are the same size. The src+dst descriptor rings * are sized at 3.5x the number of potential commands. Slower parts * (e.g. 7951) tend to run out of src descriptors; faster parts (7811) * src+cmd/result descriptors. It's not clear that increasing the size * of the descriptor rings helps performance significantly as other * factors tend to come into play (e.g. copying misaligned packets). */ #define HIFN_D_CMD_RSIZE 24 /* command descriptors */ #define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */ #define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */ #define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */ /* * Length values for cryptography */ #define HIFN_DES_KEY_LENGTH 8 #define HIFN_3DES_KEY_LENGTH 24 #define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH #define HIFN_IV_LENGTH 8 #define HIFN_AES_IV_LENGTH 16 #define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH /* * Length values for authentication */ #define HIFN_MAC_KEY_LENGTH 64 #define HIFN_MD5_LENGTH 16 #define HIFN_SHA1_LENGTH 20 #define HIFN_MAC_TRUNC_LENGTH 12 #define MAX_SCATTER 64 /* * Data structure to hold all 4 rings and any other ring related data * that should reside in DMA. */ struct hifn_dma { /* * Descriptor rings. We add +1 to the size to accomidate the * jump descriptor. */ struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; u_int32_t slop[HIFN_D_CMD_RSIZE]; u_int64_t test_src, test_dst; } ; struct hifn_session { - int hs_used; - int hs_mlen; u_int8_t hs_iv[HIFN_MAX_IV_LENGTH]; + int hs_mlen; }; #define HIFN_RING_SYNC(sc, r, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) #define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f)) #define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f)) #define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f)) #define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f)) #define HIFN_CMD_SYNC(sc, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) #define HIFN_RES_SYNC(sc, i, f) \ bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) /* * Holds data specific to a single HIFN board. */ struct hifn_softc { device_t sc_dev; /* device backpointer */ struct mtx sc_mtx; /* per-instance lock */ bus_dma_tag_t sc_dmat; /* parent DMA tag descriptor */ struct resource *sc_bar0res; bus_space_handle_t sc_sh0; /* bar0 bus space handle */ bus_space_tag_t sc_st0; /* bar0 bus space tag */ bus_size_t sc_bar0_lastreg;/* bar0 last reg written */ struct resource *sc_bar1res; bus_space_handle_t sc_sh1; /* bar1 bus space handle */ bus_space_tag_t sc_st1; /* bar1 bus space tag */ bus_size_t sc_bar1_lastreg;/* bar1 last reg written */ struct resource *sc_irq; void *sc_intrhand; /* interrupt handle */ u_int32_t sc_dmaier; u_int32_t sc_drammodel; /* 1=dram, 0=sram */ u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */ struct hifn_dma *sc_dma; bus_dmamap_t sc_dmamap; bus_dma_segment_t sc_dmasegs[1]; bus_addr_t sc_dma_physaddr;/* physical address of sc_dma */ int sc_dmansegs; struct hifn_command *sc_hifn_commands[HIFN_D_RES_RSIZE]; /* * Our current positions for insertion and removal from the desriptor * rings. */ int sc_cmdi, sc_srci, sc_dsti, sc_resi; volatile int sc_cmdu, sc_srcu, sc_dstu, sc_resu; int sc_cmdk, sc_srck, sc_dstk, sc_resk; int32_t sc_cid; int sc_maxses; - int sc_nsessions; - struct hifn_session *sc_sessions; int sc_ramsize; int sc_flags; #define HIFN_HAS_RNG 0x1 /* includes random number generator */ #define HIFN_HAS_PUBLIC 0x2 /* includes public key support */ #define HIFN_HAS_AES 0x4 /* includes AES support */ #define HIFN_IS_7811 0x8 /* Hifn 7811 part */ #define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */ struct callout sc_rngto; /* for polling RNG */ struct callout sc_tickto; /* for managing DMA */ int sc_rngfirst; int sc_rnghz; /* RNG polling frequency */ struct rndtest_state *sc_rndtest; /* RNG test state */ void (*sc_harvest)(struct rndtest_state *, void *, u_int); int sc_c_busy; /* command ring busy */ int sc_s_busy; /* source data ring busy */ int sc_d_busy; /* destination data ring busy */ int sc_r_busy; /* result ring busy */ int sc_active; /* for initial countdown */ int sc_needwakeup; /* ops q'd wating on resources */ int sc_curbatch; /* # ops submitted w/o int */ int sc_suspended; #ifdef HIFN_VULCANDEV struct cdev *sc_pkdev; #endif }; #define HIFN_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define HIFN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) /* * hifn_command_t * * This is the control structure used to pass commands to hifn_encrypt(). * * flags * ----- * Flags is the bitwise "or" values for command configuration. A single * encrypt direction needs to be set: * * HIFN_ENCODE or HIFN_DECODE * * To use cryptography, a single crypto algorithm must be included: * * HIFN_CRYPT_3DES or HIFN_CRYPT_DES * * To use authentication is used, a single MAC algorithm must be included: * * HIFN_MAC_MD5 or HIFN_MAC_SHA1 * * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash. * If the value below is set, hash values are truncated or assumed * truncated to 12 bytes: * * HIFN_MAC_TRUNC * * Keys for encryption and authentication can be sent as part of a command, * or the last key value used with a particular session can be retrieved * and used again if either of these flags are not specified. * * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY * * session_num * ----------- * A number between 0 and 2048 (for DRAM models) or a number between * 0 and 768 (for SRAM models). Those who don't want to use session * numbers should leave value at zero and send a new crypt key and/or * new MAC key on every command. If you use session numbers and * don't send a key with a command, the last key sent for that same * session number will be used. * * Warning: Using session numbers and multiboard at the same time * is currently broken. * * mbuf * ---- * Either fill in the mbuf pointer and npa=0 or * fill packp[] and packl[] and set npa to > 0 * * mac_header_skip * --------------- * The number of bytes of the source_buf that are skipped over before * authentication begins. This must be a number between 0 and 2^16-1 * and can be used by IPsec implementers to skip over IP headers. * *** Value ignored if authentication not used *** * * crypt_header_skip * ----------------- * The number of bytes of the source_buf that are skipped over before * the cryptographic operation begins. This must be a number between 0 * and 2^16-1. For IPsec, this number will always be 8 bytes larger * than the auth_header_skip (to skip over the ESP header). * *** Value ignored if cryptography not used *** * */ struct hifn_operand { union { struct mbuf *m; struct uio *io; } u; bus_dmamap_t map; bus_size_t mapsize; int nsegs; bus_dma_segment_t segs[MAX_SCATTER]; }; struct hifn_command { - u_int16_t session_num; + struct hifn_session *session; u_int16_t base_masks, cry_masks, mac_masks; u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH]; int cklen; int sloplen, slopidx; struct hifn_operand src; struct hifn_operand dst; struct hifn_softc *softc; struct cryptop *crp; struct cryptodesc *enccrd, *maccrd; }; #define src_m src.u.m #define src_io src.u.io #define src_map src.map #define src_mapsize src.mapsize #define src_segs src.segs #define src_nsegs src.nsegs #define dst_m dst.u.m #define dst_io dst.u.io #define dst_map dst.map #define dst_mapsize dst.mapsize #define dst_segs dst.segs #define dst_nsegs dst.nsegs /* * Return values for hifn_crypto() */ #define HIFN_CRYPTO_SUCCESS 0 #define HIFN_CRYPTO_BAD_INPUT (-1) #define HIFN_CRYPTO_RINGS_FULL (-2) /************************************************************************** * * Function: hifn_crypto * * Purpose: Called by external drivers to begin an encryption on the * HIFN board. * * Blocking/Non-blocking Issues * ============================ * The driver cannot block in hifn_crypto (no calls to tsleep) currently. * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough * room in any of the rings for the request to proceed. * * Return Values * ============= * 0 for success, negative values on error * * Defines for negative error codes are: * * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings. * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking * behaviour was requested. * *************************************************************************/ - -/* - * Convert back and forth from 'sid' to 'card' and 'session' - */ -#define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28) -#define HIFN_SESSION(sid) ((sid) & 0x000007ff) -#define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff)) - #endif /* _KERNEL */ struct hifn_stats { u_int64_t hst_ibytes; u_int64_t hst_obytes; u_int32_t hst_ipackets; u_int32_t hst_opackets; u_int32_t hst_invalid; u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */ u_int32_t hst_abort; u_int32_t hst_noirq; /* IRQ for no reason */ u_int32_t hst_totbatch; /* ops submitted w/o interrupt */ u_int32_t hst_maxbatch; /* max ops submitted together */ u_int32_t hst_unaligned; /* unaligned src caused copy */ /* * The following divides hst_nomem into more specific buckets. */ u_int32_t hst_nomem_map; /* bus_dmamap_create failed */ u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */ u_int32_t hst_nomem_mbuf; /* MGET* failed */ u_int32_t hst_nomem_mcl; /* MCLGET* failed */ u_int32_t hst_nomem_cr; /* out of command/result descriptor */ u_int32_t hst_nomem_sd; /* out of src/dst descriptors */ }; #endif /* __HIFN7751VAR_H__ */ Index: head/sys/dev/safe/safe.c =================================================================== --- head/sys/dev/safe/safe.c (revision 336438) +++ head/sys/dev/safe/safe.c (revision 336439) @@ -1,2229 +1,2167 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SafeNet SafeXcel-1141 hardware crypto accelerator */ #include "opt_safe.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef SAFE_RNDTEST #include #endif #include #include #ifndef bswap32 #define bswap32 NTOHL #endif /* * Prototypes and count for the pci_device structure */ static int safe_probe(device_t); static int safe_attach(device_t); static int safe_detach(device_t); static int safe_suspend(device_t); static int safe_resume(device_t); static int safe_shutdown(device_t); -static int safe_newsession(device_t, u_int32_t *, struct cryptoini *); -static int safe_freesession(device_t, u_int64_t); +static int safe_newsession(device_t, crypto_session_t, struct cryptoini *); static int safe_process(device_t, struct cryptop *, int); static device_method_t safe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, safe_probe), DEVMETHOD(device_attach, safe_attach), DEVMETHOD(device_detach, safe_detach), DEVMETHOD(device_suspend, safe_suspend), DEVMETHOD(device_resume, safe_resume), DEVMETHOD(device_shutdown, safe_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, safe_newsession), - DEVMETHOD(cryptodev_freesession,safe_freesession), DEVMETHOD(cryptodev_process, safe_process), DEVMETHOD_END }; static driver_t safe_driver = { "safe", safe_methods, sizeof (struct safe_softc) }; static devclass_t safe_devclass; DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); MODULE_DEPEND(safe, crypto, 1, 1, 1); #ifdef SAFE_RNDTEST MODULE_DEPEND(safe, rndtest, 1, 1, 1); #endif static void safe_intr(void *); static void safe_callback(struct safe_softc *, struct safe_ringentry *); static void safe_feed(struct safe_softc *, struct safe_ringentry *); static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); #ifndef SAFE_NO_RNG static void safe_rng_init(struct safe_softc *); static void safe_rng(void *); #endif /* SAFE_NO_RNG */ static int safe_dma_malloc(struct safe_softc *, bus_size_t, struct safe_dma_alloc *, int); #define safe_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); static int safe_dmamap_aligned(const struct safe_operand *); static int safe_dmamap_uniform(const struct safe_operand *); static void safe_reset_board(struct safe_softc *); static void safe_init_board(struct safe_softc *); static void safe_init_pciregs(device_t dev); static void safe_cleanchip(struct safe_softc *); static void safe_totalreset(struct safe_softc *); static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters"); #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *, const char *); static void safe_dump_ringstate(struct safe_softc *, const char *); static void safe_dump_intrstate(struct safe_softc *, const char *); static void safe_dump_request(struct safe_softc *, const char *, struct safe_ringentry *); static struct safe_softc *safec; /* for use by hw.safe.dump */ static int safe_debug = 0; SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 0, "control debugging msgs"); #define DPRINTF(_x) if (safe_debug) printf _x #else #define DPRINTF(_x) #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) struct safe_stats safestats; SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, safe_stats, "driver statistics"); #ifndef SAFE_NO_RNG static int safe_rnginterval = 1; /* poll once a second */ SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 0, "RNG polling interval (secs)"); static int safe_rngbufsize = 16; /* 64 bytes each poll */ SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 0, "RNG polling buffer size (32-bit words)"); static int safe_rngmaxalarm = 8; /* max alarms before reset */ SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 0, "RNG max alarms before reset"); #endif /* SAFE_NO_RNG */ static int safe_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* safe_partname(struct safe_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_SAFENET: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; } return "SafeNet unknown-part"; } return "Unknown-vendor unknown-part"; } #ifndef SAFE_NO_RNG static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_SAFE); } #endif /* SAFE_NO_RNG */ static int safe_attach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); u_int32_t raddr; u_int32_t i, devinfo; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; /* XXX handle power management */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, safe_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_SSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_srcdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_MAX_DSIZE, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_DSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_dstdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Allocate packet engine descriptors. */ if (safe_dma_malloc(sc, SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), &sc->sc_ringalloc, 0)) { device_printf(dev, "cannot allocate PE descriptor ring\n"); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } /* * Hookup the static portion of all our data structures. */ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; sc->sc_front = sc->sc_ring; sc->sc_back = sc->sc_ring; raddr = sc->sc_ringalloc.dma_paddr; bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); for (i = 0; i < SAFE_MAX_NQUEUE; i++) { struct safe_ringentry *re = &sc->sc_ring[i]; re->re_desc.d_sa = raddr + offsetof(struct safe_ringentry, re_sa); re->re_sa.sa_staterec = raddr + offsetof(struct safe_ringentry, re_sastate); raddr += sizeof (struct safe_ringentry); } mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), "packet engine ring", MTX_DEF); /* * Allocate scatter and gather particle descriptors. */ if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), &sc->sc_spalloc, 0)) { device_printf(dev, "cannot allocate source particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; sc->sc_spfree = sc->sc_spring; bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), &sc->sc_dpalloc, 0)) { device_printf(dev, "cannot allocate destination particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_spalloc); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_dstdmat); goto bad4; } sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; sc->sc_dpfree = sc->sc_dpring; bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); device_printf(sc->sc_dev, "%s", safe_partname(sc)); devinfo = READ_REG(sc, SAFE_DEVINFO); if (devinfo & SAFE_DEVINFO_RNG) { sc->sc_flags |= SAFE_FLAGS_RNG; printf(" rng"); } if (devinfo & SAFE_DEVINFO_PKEY) { #if 0 printf(" key"); sc->sc_flags |= SAFE_FLAGS_KEY; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } if (devinfo & SAFE_DEVINFO_DES) { printf(" des/3des"); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); } if (devinfo & SAFE_DEVINFO_AES) { printf(" aes"); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); } if (devinfo & SAFE_DEVINFO_MD5) { printf(" md5"); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); } if (devinfo & SAFE_DEVINFO_SHA1) { printf(" sha1"); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); } printf(" null"); crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0); /* XXX other supported algorithms */ printf("\n"); safe_reset_board(sc); /* reset h/w */ safe_init_pciregs(dev); /* init pci settings */ safe_init_board(sc); /* init h/w */ #ifndef SAFE_NO_RNG if (sc->sc_flags & SAFE_FLAGS_RNG) { #ifdef SAFE_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif safe_rng_init(sc); callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); } #endif /* SAFE_NO_RNG */ #ifdef SAFE_DEBUG safec = sc; /* for use by hw.safe.dump */ #endif return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int safe_detach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef SAFE_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif safe_cleanchip(sc); safe_dma_free(sc, &sc->sc_dpalloc); safe_dma_free(sc, &sc->sc_spalloc); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_srcdmat); bus_dma_tag_destroy(sc->sc_dstdmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int safe_shutdown(device_t dev) { #ifdef notyet safe_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int safe_suspend(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int safe_resume(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * SafeXcel Interrupt routine */ static void safe_intr(void *arg) { struct safe_softc *sc = arg; volatile u_int32_t stat; stat = READ_REG(sc, SAFE_HM_STAT); if (stat == 0) /* shared irq, not for us */ return; WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ if ((stat & SAFE_INT_PE_DDONE)) { /* * Descriptor(s) done; scan the ring and * process completed operations. */ mtx_lock(&sc->sc_ringmtx); while (sc->sc_back != sc->sc_front) { struct safe_ringentry *re = sc->sc_back; #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif /* * safe_process marks ring entries that were allocated * but not used with a csr of zero. This insures the * ring front pointer never needs to be set backwards * in the event that an entry is allocated but not used * because of a setup error. */ if (re->re_desc.d_csr != 0) { if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) break; if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) break; sc->sc_nqchip--; safe_callback(sc, re); } if (++(sc->sc_back) == sc->sc_ringtop) sc->sc_back = sc->sc_ring; } mtx_unlock(&sc->sc_ringmtx); } /* * Check to see if we got any DMA Error */ if (stat & SAFE_INT_PE_ERROR) { DPRINTF(("dmaerr dmastat %08x\n", READ_REG(sc, SAFE_PE_DMASTAT))); safestats.st_dmaerr++; safe_totalreset(sc); #if 0 safe_feed(sc); #endif } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); DPRINTF(("%s: wakeup crypto %x\n", __func__, sc->sc_needwakeup)); sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * safe_feed() - post a request to chip */ static void safe_feed(struct safe_softc *sc, struct safe_ringentry *re) { bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); if (re->re_dst_map != NULL) bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_PREREAD); /* XXX have no smaller granularity */ safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif sc->sc_nqchip++; if (sc->sc_nqchip > safestats.st_maxqchip) safestats.st_maxqchip = sc->sc_nqchip; /* poke h/w to check descriptor ring, any value can be written */ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); } #define N(a) (sizeof(a) / sizeof (a[0])) static void safe_setup_enckey(struct safe_session *ses, caddr_t key) { int i; bcopy(key, ses->ses_key, ses->ses_klen / 8); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_key); i++) ses->ses_key[i] = htole32(ses->ses_key[i]); } static void safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen) { MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i; for (i = 0; i < klen; i++) key[i] ^= HMAC_IPAD_VAL; if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_ipad_buffer, MD5_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_ipad_buffer, SHA1_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_opad_buffer, MD5_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_opad_buffer, SHA1_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= HMAC_OPAD_VAL; /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_hminner); i++) { ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); } } #undef N /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int -safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +safe_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct safe_softc *sc = device_get_softc(dev); struct cryptoini *c, *encini = NULL, *macini = NULL; struct safe_session *ses = NULL; - int sesn; - if (sidp == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC || c->cri_alg == CRYPTO_NULL_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC || c->cri_alg == CRYPTO_NULL_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (encini) { /* validate key length */ switch (encini->cri_alg) { case CRYPTO_DES_CBC: if (encini->cri_klen != 64) return (EINVAL); break; case CRYPTO_3DES_CBC: if (encini->cri_klen != 192) return (EINVAL); break; case CRYPTO_AES_CBC: if (encini->cri_klen != 128 && encini->cri_klen != 192 && encini->cri_klen != 256) return (EINVAL); break; } } - if (sc->sc_sessions == NULL) { - ses = sc->sc_sessions = (struct safe_session *)malloc( - sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - sesn = 0; - sc->sc_nsessions = 1; - } else { - for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { - if (sc->sc_sessions[sesn].ses_used == 0) { - ses = &sc->sc_sessions[sesn]; - break; - } - } - - if (ses == NULL) { - sesn = sc->sc_nsessions; - ses = (struct safe_session *)malloc((sesn + 1) * - sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - bcopy(sc->sc_sessions, ses, sesn * - sizeof(struct safe_session)); - bzero(sc->sc_sessions, sesn * - sizeof(struct safe_session)); - free(sc->sc_sessions, M_DEVBUF); - sc->sc_sessions = ses; - ses = &sc->sc_sessions[sesn]; - sc->sc_nsessions++; - } - } - - bzero(ses, sizeof(struct safe_session)); - ses->ses_used = 1; - + ses = crypto_get_driver_session(cses); if (encini) { /* get an IV */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); ses->ses_klen = encini->cri_klen; if (encini->cri_key != NULL) safe_setup_enckey(ses, encini->cri_key); } if (macini) { ses->ses_mlen = macini->cri_mlen; if (ses->ses_mlen == 0) { if (macini->cri_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } if (macini->cri_key != NULL) { safe_setup_mackey(ses, macini->cri_alg, macini->cri_key, macini->cri_klen / 8); } } - *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn); return (0); } -/* - * Deallocate a session. - */ -static int -safe_freesession(device_t dev, u_int64_t tid) -{ - struct safe_softc *sc = device_get_softc(dev); - int session, ret; - u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; - - if (sc == NULL) - return (EINVAL); - - session = SAFE_SESSION(sid); - if (session < sc->sc_nsessions) { - bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); - ret = 0; - } else - ret = EINVAL; - return (ret); -} - static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct safe_operand *op = arg; DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__, (u_int) mapsize, nsegs, error)); if (error != 0) return; op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int safe_process(device_t dev, struct cryptop *crp, int hint) { struct safe_softc *sc = device_get_softc(dev); int err = 0, i, nicealign, uniform; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int bypass, oplen, ivsize; caddr_t iv; int16_t coffset; struct safe_session *ses; struct safe_ringentry *re; struct safe_sarec *sa; struct safe_pdesc *pd; u_int32_t cmd0, cmd1, staterec; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { safestats.st_invalid++; return (EINVAL); } - if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) { - safestats.st_badsession++; - return (EINVAL); - } mtx_lock(&sc->sc_ringmtx); if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { safestats.st_ringfull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_ringmtx); return (ERESTART); } re = sc->sc_front; staterec = re->re_sa.sa_staterec; /* save */ /* NB: zero everything but the PE descriptor */ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); re->re_sa.sa_staterec = staterec; /* restore */ re->re_crp = crp; - re->re_sesn = SAFE_SESSION(crp->crp_sid); if (crp->crp_flags & CRYPTO_F_IMBUF) { re->re_src_m = (struct mbuf *)crp->crp_buf; re->re_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { re->re_src_io = (struct uio *)crp->crp_buf; re->re_dst_io = (struct uio *)crp->crp_buf; } else { safestats.st_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } sa = &re->re_sa; - ses = &sc->sc_sessions[re->re_sesn]; + ses = crypto_get_driver_session(crp->crp_session); crd1 = crp->crp_desc; if (crd1 == NULL) { safestats.st_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ cmd1 = 0; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) { maccrd = crd1; enccrd = NULL; cmd0 |= SAFE_SA_CMD0_OP_HASH; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) { maccrd = NULL; enccrd = crd1; cmd0 |= SAFE_SA_CMD0_OP_CRYPT; } else { safestats.st_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_NULL_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_NULL_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_NULL_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_NULL_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { safestats.st_badalg++; err = EINVAL; goto errout; } cmd0 |= SAFE_SA_CMD0_OP_BOTH; } if (enccrd) { if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) safe_setup_enckey(ses, enccrd->crd_key); if (enccrd->crd_alg == CRYPTO_DES_CBC) { cmd0 |= SAFE_SA_CMD0_DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) { cmd0 |= SAFE_SA_CMD0_3DES; cmd1 |= SAFE_SA_CMD1_CBC; ivsize = 2*sizeof(u_int32_t); } else if (enccrd->crd_alg == CRYPTO_AES_CBC) { cmd0 |= SAFE_SA_CMD0_AES; cmd1 |= SAFE_SA_CMD1_CBC; if (ses->ses_klen == 128) cmd1 |= SAFE_SA_CMD1_AES128; else if (ses->ses_klen == 192) cmd1 |= SAFE_SA_CMD1_AES192; else cmd1 |= SAFE_SA_CMD1_AES256; ivsize = 4*sizeof(u_int32_t); } else { cmd0 |= SAFE_SA_CMD0_CRYPT_NULL; ivsize = 0; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ if (enccrd->crd_flags & CRD_F_ENCRYPT) { cmd0 |= SAFE_SA_CMD0_OUTBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) iv = enccrd->crd_iv; else iv = (caddr_t) ses->ses_iv; if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, iv); } bcopy(iv, re->re_sastate.sa_saved_iv, ivsize); cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV; re->re_flags |= SAFE_QFLAGS_COPYOUTIV; } else { cmd0 |= SAFE_SA_CMD0_INBOUND; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { bcopy(enccrd->crd_iv, re->re_sastate.sa_saved_iv, ivsize); } else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivsize, (caddr_t)re->re_sastate.sa_saved_iv); } cmd0 |= SAFE_SA_CMD0_IVLD_STATE; } /* * For basic encryption use the zero pad algorithm. * This pads results to an 8-byte boundary and * suppresses padding verification for inbound (i.e. * decrypt) operations. * * NB: Not sure if the 8-byte pad boundary is a problem. */ cmd0 |= SAFE_SA_CMD0_PAD_ZERO; /* XXX assert key bufs have the same size */ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); } if (maccrd) { if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { safe_setup_mackey(ses, maccrd->crd_alg, maccrd->crd_key, maccrd->crd_klen / 8); } if (maccrd->crd_alg == CRYPTO_MD5_HMAC) { cmd0 |= SAFE_SA_CMD0_MD5; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) { cmd0 |= SAFE_SA_CMD0_SHA1; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ } else { cmd0 |= SAFE_SA_CMD0_HASH_NULL; } /* * Digest data is loaded from the SA and the hash * result is saved to the state block where we * retrieve it for return to the caller. */ /* XXX assert digest bufs have the same size */ bcopy(ses->ses_hminner, sa->sa_indigest, sizeof(sa->sa_indigest)); bcopy(ses->ses_hmouter, sa->sa_outdigest, sizeof(sa->sa_outdigest)); cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; re->re_flags |= SAFE_QFLAGS_COPYOUTICV; } if (enccrd && maccrd) { /* * The offset from hash data to the start of * crypt data is the difference in the skips. */ bypass = maccrd->crd_skip; coffset = enccrd->crd_skip - maccrd->crd_skip; if (coffset < 0) { DPRINTF(("%s: hash does not precede crypt; " "mac skip %u enc skip %u\n", __func__, maccrd->crd_skip, enccrd->crd_skip)); safestats.st_skipmismatch++; err = EINVAL; goto errout; } oplen = enccrd->crd_skip + enccrd->crd_len; if (maccrd->crd_skip + maccrd->crd_len != oplen) { DPRINTF(("%s: hash amount %u != crypt amount %u\n", __func__, maccrd->crd_skip + maccrd->crd_len, oplen)); safestats.st_lenmismatch++; err = EINVAL; goto errout; } #ifdef SAFE_DEBUG if (safe_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("bypass %d coffset %d oplen %d\n", bypass, coffset, oplen); } #endif if (coffset & 3) { /* offset must be 32-bit aligned */ DPRINTF(("%s: coffset %u misaligned\n", __func__, coffset)); safestats.st_coffmisaligned++; err = EINVAL; goto errout; } coffset >>= 2; if (coffset > 255) { /* offset must be <256 dwords */ DPRINTF(("%s: coffset %u too big\n", __func__, coffset)); safestats.st_cofftoobig++; err = EINVAL; goto errout; } /* * Tell the hardware to copy the header to the output. * The header is defined as the data from the end of * the bypass to the start of data to be encrypted. * Typically this is the inline IV. Note that you need * to do this even if src+dst are the same; it appears * that w/o this bit the crypted data is written * immediately after the bypass data. */ cmd1 |= SAFE_SA_CMD1_HDRCOPY; /* * Disable IP header mutable bit handling. This is * needed to get correct HMAC calculations. */ cmd1 |= SAFE_SA_CMD1_MUTABLE; } else { if (enccrd) { bypass = enccrd->crd_skip; oplen = bypass + enccrd->crd_len; } else { bypass = maccrd->crd_skip; oplen = bypass + maccrd->crd_len; } coffset = 0; } /* XXX verify multiple of 4 when using s/g */ if (bypass > 96) { /* bypass offset must be <= 96 bytes */ DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); safestats.st_bypasstoobig++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map, re->re_src_m, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map, re->re_src_io, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); DPRINTF(("src nicealign %u uniform %u nsegs %u\n", nicealign, uniform, re->re_src.nsegs)); if (re->re_src.nsegs > 1) { re->re_desc.d_src = sc->sc_spalloc.dma_paddr + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); for (i = 0; i < re->re_src_nsegs; i++) { /* NB: no need to check if there's space */ pd = sc->sc_spfree; if (++(sc->sc_spfree) == sc->sc_springtop) sc->sc_spfree = sc->sc_spring; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus source particle descriptor; flags %x", pd->pd_flags)); pd->pd_addr = re->re_src_segs[i].ds_addr; pd->pd_size = re->re_src_segs[i].ds_len; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_IGATHER; } else { /* * No need for gather, reference the operand directly. */ re->re_desc.d_src = re->re_src_segs[0].ds_addr; } if (enccrd == NULL && maccrd != NULL) { /* * Hash op; no destination needed. */ } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { safestats.st_iovmisaligned++; err = EINVAL; goto errout; } if (uniform != 1) { /* * Source is not suitable for direct use as * the destination. Create a new scatter/gather * list based on the destination requirements * and check if that's ok. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dstdmat, re->re_dst_map, re->re_dst_io, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } uniform = safe_dmamap_uniform(&re->re_dst); if (!uniform) { /* * There's no way to handle the DMA * requirements with this uio. We * could create a separate DMA area for * the result and then copy it back, * but for now we just bail and return * an error. Note that uio requests * > SAFE_MAX_DSIZE are handled because * the DMA map and segment list for the * destination wil result in a * destination particle list that does * the necessary scatter DMA. */ safestats.st_iovnotuniform++; err = EINVAL; goto errout; } } else re->re_dst = re->re_src; } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign && uniform == 1) { /* * Source layout is suitable for direct * sharing of the DMA map and segment list. */ re->re_dst = re->re_src; } else if (nicealign && uniform == 2) { /* * The source is properly aligned but requires a * different particle list to handle DMA of the * result. Create a new map and do the load to * create the segment list. The particle * descriptor setup code below will handle the * rest. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else { /* !(aligned and/or uniform) */ int totlen, len; struct mbuf *m, *top, **mp; /* * DMA constraints require that we allocate a * new mbuf chain for the destination. We * allocate an entire new set of mbufs of * optimal/required size and then tell the * hardware to copy any bits that are not * created as a byproduct of the operation. */ if (!nicealign) safestats.st_unaligned++; if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; if (re->re_src_m->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, re->re_src_m, M_NOWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_NOWAIT, MT_DATA); } if (m == NULL) { safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { m_free(m); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { *mp = m; m_freem(top); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } re->re_dst_m = top; if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dstdmat, re->re_dst_map, re->re_dst_m, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } if (re->re_src.mapsize > oplen) { /* * There's data following what the * hardware will copy for us. If this * isn't just the ICV (that's going to * be written on completion), copy it * to the new mbufs */ if (!(maccrd && (re->re_src.mapsize-oplen) == 12 && maccrd->crd_inject == oplen)) safe_mcopy(re->re_src_m, re->re_dst_m, oplen); else safestats.st_noicvcopy++; } } } else { safestats.st_badflags++; err = EINVAL; goto errout; } if (re->re_dst.nsegs > 1) { re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); for (i = 0; i < re->re_dst_nsegs; i++) { pd = sc->sc_dpfree; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus dest particle descriptor; flags %x", pd->pd_flags)); if (++(sc->sc_dpfree) == sc->sc_dpringtop) sc->sc_dpfree = sc->sc_dpring; pd->pd_addr = re->re_dst_segs[i].ds_addr; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_OSCATTER; } else { /* * No need for scatter, reference the operand directly. */ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; } } /* * All done with setup; fillin the SA command words * and the packet engine descriptor. The operation * is now ready for submission to the hardware. */ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; sa->sa_cmd1 = cmd1 | (coffset << SAFE_SA_CMD1_OFFSET_S) | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ | SAFE_SA_CMD1_SRPCI ; /* * NB: the order of writes is important here. In case the * chip is scanning the ring because of an outstanding request * it might nab this one too. In that case we need to make * sure the setup is complete before we write the length * field of the descriptor as it signals the descriptor is * ready for processing. */ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; if (maccrd) re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; re->re_desc.d_len = oplen | SAFE_PE_LEN_READY | (bypass << SAFE_PE_LEN_BYPASS_S) ; safestats.st_ipackets++; safestats.st_ibytes += oplen; if (++(sc->sc_front) == sc->sc_ringtop) sc->sc_front = sc->sc_ring; /* XXX honor batching */ safe_feed(sc, re); mtx_unlock(&sc->sc_ringmtx); return (0); errout: if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } if (re->re_src_map != NULL) { bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); } mtx_unlock(&sc->sc_ringmtx); if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void safe_callback(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp = (struct cryptop *)re->re_crp; + struct safe_session *ses; struct cryptodesc *crd; + ses = crypto_get_driver_session(crp->crp_session); + safestats.st_opackets++; safestats.st_obytes += re->re_dst.mapsize; safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", re->re_desc.d_csr, re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); safestats.st_peoperr++; crp->crp_etype = EIO; /* something more meaningful? */ } if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); /* * If result was written to a differet mbuf chain, swap * it in as the return value and reclaim the original. */ if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) { m_freem(re->re_src_m); crp->crp_buf = (caddr_t)re->re_dst_m; } if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) { /* copy out IV for future use */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { int ivsize; if (crd->crd_alg == CRYPTO_DES_CBC || crd->crd_alg == CRYPTO_3DES_CBC) { ivsize = 2*sizeof(u_int32_t); } else if (crd->crd_alg == CRYPTO_AES_CBC) { ivsize = 4*sizeof(u_int32_t); } else continue; crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip + crd->crd_len - ivsize, ivsize, - (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv); + (caddr_t)ses->ses_iv); break; } } if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { /* copy out ICV result */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (!(crd->crd_alg == CRYPTO_MD5_HMAC || crd->crd_alg == CRYPTO_SHA1_HMAC || crd->crd_alg == CRYPTO_NULL_HMAC)) continue; if (crd->crd_alg == CRYPTO_SHA1_HMAC) { /* * SHA-1 ICV's are byte-swapped; fix 'em up * before copy them to their destination. */ re->re_sastate.sa_saved_indigest[0] = bswap32(re->re_sastate.sa_saved_indigest[0]); re->re_sastate.sa_saved_indigest[1] = bswap32(re->re_sastate.sa_saved_indigest[1]); re->re_sastate.sa_saved_indigest[2] = bswap32(re->re_sastate.sa_saved_indigest[2]); } crypto_copyback(crp->crp_flags, crp->crp_buf, - crd->crd_inject, - sc->sc_sessions[re->re_sesn].ses_mlen, + crd->crd_inject, ses->ses_mlen, (caddr_t)re->re_sastate.sa_saved_indigest); break; } } crypto_done(crp); } /* * Copy all data past offset from srcm to dstm. */ static void safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) { u_int j, dlen, slen; caddr_t dptr, sptr; /* * Advance src and dst to offset. */ j = offset; while (j >= srcm->m_len) { j -= srcm->m_len; srcm = srcm->m_next; if (srcm == NULL) return; } sptr = mtod(srcm, caddr_t) + j; slen = srcm->m_len - j; j = offset; while (j >= dstm->m_len) { j -= dstm->m_len; dstm = dstm->m_next; if (dstm == NULL) return; } dptr = mtod(dstm, caddr_t) + j; dlen = dstm->m_len - j; /* * Copy everything that remains. */ for (;;) { j = min(slen, dlen); bcopy(sptr, dptr, j); if (slen == j) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } else sptr += j, slen -= j; if (dlen == j) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } else dptr += j, dlen -= j; } } #ifndef SAFE_NO_RNG #define SAFE_RNG_MAXWAIT 1000 static void safe_rng_init(struct safe_softc *sc) { u_int32_t w, v; int i; WRITE_REG(sc, SAFE_RNG_CTRL, 0); /* use default value according to the manual */ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); /* * There is a bug in rev 1.0 of the 1140 that when the RNG * is brought out of reset the ready status flag does not * work until the RNG has finished its internal initialization. * * So in order to determine the device is through its * initialization we must read the data register, using the * status reg in the read in case it is initialized. Then read * the data register until it changes from the first read. * Once it changes read the data register until it changes * again. At this time the RNG is considered initialized. * This could take between 750ms - 1000ms in time. */ i = 0; w = READ_REG(sc, SAFE_RNG_OUT); do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) { w = v; break; } DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); /* Wait Until data changes again */ i = 0; do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) break; DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); } static __inline void safe_rng_disable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); } static __inline void safe_rng_enable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); } static __inline u_int32_t safe_rng_read(struct safe_softc *sc) { int i; i = 0; while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) ; return READ_REG(sc, SAFE_RNG_OUT); } static void safe_rng(void *arg) { struct safe_softc *sc = arg; u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ u_int maxwords; int i; safestats.st_rng++; /* * Fetch the next block of data. */ maxwords = safe_rngbufsize; if (maxwords > SAFE_RNG_MAXBUFSIZ) maxwords = SAFE_RNG_MAXBUFSIZ; retry: for (i = 0; i < maxwords; i++) buf[i] = safe_rng_read(sc); /* * Check the comparator alarm count and reset the h/w if * it exceeds our threshold. This guards against the * hardware oscillators resonating with external signals. */ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { u_int32_t freq_inc, w; DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); safestats.st_rngalarm++; safe_rng_enable_short_cycle(sc); freq_inc = 18; for (i = 0; i < 64; i++) { w = READ_REG(sc, SAFE_RNG_CNFG); freq_inc = ((w + freq_inc) & 0x3fL); w = ((w & ~0x3fL) | freq_inc); WRITE_REG(sc, SAFE_RNG_CNFG, w); WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (void) safe_rng_read(sc); DELAY(25); if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { safe_rng_disable_short_cycle(sc); goto retry; } freq_inc = 1; } safe_rng_disable_short_cycle(sc); } else WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); callout_reset(&sc->sc_rngto, hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); } #endif /* SAFE_NO_RNG */ static void safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int safe_dma_malloc( struct safe_softc *sc, bus_size_t size, struct safe_dma_alloc *dma, int mapflags ) { int r; r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ sizeof(u_int32_t), 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (uintmax_t)size, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, safe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void safe_reset_board(struct safe_softc *sc) { u_int32_t v; /* * Reset the device. The manual says no delay * is needed between marking and clearing reset. */ v = READ_REG(sc, SAFE_PE_DMACFG) &~ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v); } /* * Initialize registers we need to touch only once. */ static void safe_init_board(struct safe_softc *sc) { u_int32_t v, dwords; v = READ_REG(sc, SAFE_PE_DMACFG); v &=~ SAFE_PE_DMACFG_PEMODE; v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ ; WRITE_REG(sc, SAFE_PE_DMACFG, v); #if 0 /* XXX select byte swap based on host byte order */ WRITE_REG(sc, SAFE_ENDIAN, 0x1b); #endif if (sc->sc_chiprev == SAFE_REV(1,0)) { /* * Avoid large PCI DMA transfers. Rev 1.0 has a bug where * "target mode transfers" done while the chip is DMA'ing * >1020 bytes cause the hardware to lockup. To avoid this * we reduce the max PCI transfer size and use small source * particle descriptors (<= 256 bytes). */ WRITE_REG(sc, SAFE_DMA_CFG, 256); device_printf(sc->sc_dev, "Reduce max DMA size to %u words for rev %u.%u WAR\n", (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, SAFE_REV_MAJ(sc->sc_chiprev), SAFE_REV_MIN(sc->sc_chiprev)); } /* NB: operands+results are overlaid */ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); /* * Configure ring entry size and number of items in the ring. */ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, ("PE ring entry not 32-bit aligned!")); dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); WRITE_REG(sc, SAFE_PE_RINGCFG, (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_PARTSIZE, (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); /* * NB: destination particles are fixed size. We use * an mbuf cluster and require all results go to * clusters or smaller. */ WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); /* it's now safe to enable PE mode, do it */ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); /* * Configure hardware to use level-triggered interrupts and * to interrupt after each descriptor is processed. */ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); } /* * Init PCI registers */ static void safe_init_pciregs(device_t dev) { } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void safe_cleanchip(struct safe_softc *sc) { if (sc->sc_nqchip != 0) { struct safe_ringentry *re = sc->sc_back; while (re != sc->sc_front) { if (re->re_desc.d_csr != 0) safe_free_entry(sc, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } sc->sc_back = re; sc->sc_nqchip = 0; } } /* * free a safe_q * It is assumed that the caller is within splimp(). */ static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp; /* * Free header MCR */ if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) m_freem(re->re_dst_m); crp = (struct cryptop *)re->re_crp; re->re_desc.d_csr = 0; crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void safe_totalreset(struct safe_softc *sc) { safe_reset_board(sc); safe_init_board(sc); safe_cleanchip(sc); } /* * Is the operand suitable aligned for direct DMA. Each * segment must be aligned on a 32-bit boundary and all * but the last segment must be a multiple of 4 bytes. */ static int safe_dmamap_aligned(const struct safe_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) return (0); } return (1); } /* * Is the operand suitable for direct DMA as the destination * of an operation. The hardware requires that each ``particle'' * but the last in an operation result have the same size. We * fix that size at SAFE_MAX_DSIZE bytes. This routine returns * 0 if some segment is not a multiple of of this size, 1 if all * segments are exactly this size, or 2 if segments are at worst * a multple of this size. */ static int safe_dmamap_uniform(const struct safe_operand *op) { int result = 1; if (op->nsegs > 0) { int i; for (i = 0; i < op->nsegs-1; i++) { if (op->segs[i].ds_len % SAFE_MAX_DSIZE) return (0); if (op->segs[i].ds_len != SAFE_MAX_DSIZE) result = 2; } } return (result); } #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *sc, const char *tag) { printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" , tag , READ_REG(sc, SAFE_DMA_ENDIAN) , READ_REG(sc, SAFE_DMA_SRCADDR) , READ_REG(sc, SAFE_DMA_DSTADDR) , READ_REG(sc, SAFE_DMA_STAT) ); } static void safe_dump_intrstate(struct safe_softc *sc, const char *tag) { printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" , tag , READ_REG(sc, SAFE_HI_CFG) , READ_REG(sc, SAFE_HI_MASK) , READ_REG(sc, SAFE_HI_DESC_CNT) , READ_REG(sc, SAFE_HU_STAT) , READ_REG(sc, SAFE_HM_STAT) ); } static void safe_dump_ringstate(struct safe_softc *sc, const char *tag) { u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); /* NB: assume caller has lock on ring */ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), (unsigned long)(sc->sc_back - sc->sc_ring), (unsigned long)(sc->sc_front - sc->sc_ring)); } static void safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) { int ix, nsegs; ix = re - sc->sc_ring; printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" , tag , re, ix , re->re_desc.d_csr , re->re_desc.d_src , re->re_desc.d_dst , re->re_desc.d_sa , re->re_desc.d_len ); if (re->re_src.nsegs > 1) { ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { printf(" spd[%u] %p: %p size %u flags %x" , ix, &sc->sc_spring[ix] , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr , sc->sc_spring[ix].pd_size , sc->sc_spring[ix].pd_flags ); if (sc->sc_spring[ix].pd_size == 0) printf(" (zero!)"); printf("\n"); if (++ix == SAFE_TOTAL_SPART) ix = 0; } } if (re->re_dst.nsegs > 1) { ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { printf(" dpd[%u] %p: %p flags %x\n" , ix, &sc->sc_dpring[ix] , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr , sc->sc_dpring[ix].pd_flags ); if (++ix == SAFE_TOTAL_DPART) ix = 0; } } printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); printf("sa: key %x %x %x %x %x %x %x %x\n" , re->re_sa.sa_key[0] , re->re_sa.sa_key[1] , re->re_sa.sa_key[2] , re->re_sa.sa_key[3] , re->re_sa.sa_key[4] , re->re_sa.sa_key[5] , re->re_sa.sa_key[6] , re->re_sa.sa_key[7] ); printf("sa: indigest %x %x %x %x %x\n" , re->re_sa.sa_indigest[0] , re->re_sa.sa_indigest[1] , re->re_sa.sa_indigest[2] , re->re_sa.sa_indigest[3] , re->re_sa.sa_indigest[4] ); printf("sa: outdigest %x %x %x %x %x\n" , re->re_sa.sa_outdigest[0] , re->re_sa.sa_outdigest[1] , re->re_sa.sa_outdigest[2] , re->re_sa.sa_outdigest[3] , re->re_sa.sa_outdigest[4] ); printf("sr: iv %x %x %x %x\n" , re->re_sastate.sa_saved_iv[0] , re->re_sastate.sa_saved_iv[1] , re->re_sastate.sa_saved_iv[2] , re->re_sastate.sa_saved_iv[3] ); printf("sr: hashbc %u indigest %x %x %x %x %x\n" , re->re_sastate.sa_saved_hashbc , re->re_sastate.sa_saved_indigest[0] , re->re_sastate.sa_saved_indigest[1] , re->re_sastate.sa_saved_indigest[2] , re->re_sastate.sa_saved_indigest[3] , re->re_sastate.sa_saved_indigest[4] ); } static void safe_dump_ring(struct safe_softc *sc, const char *tag) { mtx_lock(&sc->sc_ringmtx); printf("\nSafeNet Ring State:\n"); safe_dump_intrstate(sc, tag); safe_dump_dmastatus(sc, tag); safe_dump_ringstate(sc, tag); if (sc->sc_nqchip) { struct safe_ringentry *re = sc->sc_back; do { safe_dump_request(sc, tag, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } while (re != sc->sc_front); } mtx_unlock(&sc->sc_ringmtx); } static int sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) { char dmode[64]; int error; strncpy(dmode, "", sizeof(dmode) - 1); dmode[sizeof(dmode) - 1] = '\0'; error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); if (error == 0 && req->newptr != NULL) { struct safe_softc *sc = safec; if (!sc) return EINVAL; if (strncmp(dmode, "dma", 3) == 0) safe_dump_dmastatus(sc, "safe0"); else if (strncmp(dmode, "int", 3) == 0) safe_dump_intrstate(sc, "safe0"); else if (strncmp(dmode, "ring", 4) == 0) safe_dump_ring(sc, "safe0"); else return EINVAL; } return error; } SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); #endif /* SAFE_DEBUG */ Index: head/sys/dev/safe/safevar.h =================================================================== --- head/sys/dev/safe/safevar.h (revision 336438) +++ head/sys/dev/safe/safevar.h (revision 336439) @@ -1,221 +1,214 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SAFE_SAFEVAR_H_ #define _SAFE_SAFEVAR_H_ /* Maximum queue length */ #ifndef SAFE_MAX_NQUEUE #define SAFE_MAX_NQUEUE 60 #endif #define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */ #define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */ #define SAFE_MAX_DSIZE MCLBYTES /* Fixed scatter particle size */ #define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */ #define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */ /* total src+dst particle descriptors */ #define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART) #define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART) #define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */ -#define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28) -#define SAFE_SESSION(sid) ( (sid) & 0x0fffffff) -#define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff)) - #define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */ #define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */ #define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */ #ifdef _KERNEL /* * State associated with the allocation of each chunk * of memory setup for DMA. */ struct safe_dma_alloc { u_int32_t dma_paddr; /* physical address */ caddr_t dma_vaddr; /* virtual address */ bus_dma_tag_t dma_tag; /* bus dma tag used */ bus_dmamap_t dma_map; /* associated map */ bus_dma_segment_t dma_seg; bus_size_t dma_size; /* mapped memory size (bytes) */ int dma_nseg; /* number of segments */ }; /* * Cryptographic operand state. One of these exists for each * source and destination operand passed in from the crypto * subsystem. When possible source and destination operands * refer to the same memory. More often they are distinct. * We track the virtual address of each operand as well as * where each is mapped for DMA. */ struct safe_operand { union { struct mbuf *m; struct uio *io; } u; bus_dmamap_t map; bus_size_t mapsize; int nsegs; bus_dma_segment_t segs[SAFE_MAX_PART]; }; /* * Packet engine ring entry and cryptographic operation state. * The packet engine requires a ring of descriptors that contain * pointers to various cryptographic state. However the ring * configuration register allows you to specify an arbitrary size * for ring entries. We use this feature to collect most of the * state for each cryptographic request into one spot. Other than * ring entries only the ``particle descriptors'' (scatter/gather * lists) and the actual operand data are kept separate. The * particle descriptors must also be organized in rings. The * operand data can be located aribtrarily (modulo alignment constraints). * * Note that the descriptor ring is mapped onto the PCI bus so * the hardware can DMA data. This means the entire ring must be * contiguous. */ struct safe_ringentry { struct safe_desc re_desc; /* command descriptor */ struct safe_sarec re_sa; /* SA record */ struct safe_sastate re_sastate; /* SA state record */ struct cryptop *re_crp; /* crypto operation */ struct safe_operand re_src; /* source operand */ struct safe_operand re_dst; /* destination operand */ - int re_sesn; /* crypto session ID */ + int unused; int re_flags; #define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */ #define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */ }; #define re_src_m re_src.u.m #define re_src_io re_src.u.io #define re_src_map re_src.map #define re_src_nsegs re_src.nsegs #define re_src_segs re_src.segs #define re_src_mapsize re_src.mapsize #define re_dst_m re_dst.u.m #define re_dst_io re_dst.u.io #define re_dst_map re_dst.map #define re_dst_nsegs re_dst.nsegs #define re_dst_segs re_dst.segs #define re_dst_mapsize re_dst.mapsize struct rndstate_test; struct safe_session { - u_int32_t ses_used; u_int32_t ses_klen; /* key length in bits */ u_int32_t ses_key[8]; /* DES/3DES/AES key */ u_int32_t ses_mlen; /* hmac length in bytes */ u_int32_t ses_hminner[5]; /* hmac inner state */ u_int32_t ses_hmouter[5]; /* hmac outer state */ u_int32_t ses_iv[4]; /* DES/3DES/AES iv */ }; struct safe_softc { device_t sc_dev; /* device backpointer */ struct resource *sc_irq; void *sc_ih; /* interrupt handler cookie */ bus_space_handle_t sc_sh; /* memory handle */ bus_space_tag_t sc_st; /* memory tag */ struct resource *sc_sr; /* memory resource */ bus_dma_tag_t sc_srcdmat; /* source dma tag */ bus_dma_tag_t sc_dstdmat; /* destination dma tag */ u_int sc_chiprev; /* major/minor chip revision */ int sc_flags; /* device specific flags */ #define SAFE_FLAGS_KEY 0x01 /* has key accelerator */ #define SAFE_FLAGS_RNG 0x02 /* hardware rng */ int sc_suspended; int sc_needwakeup; /* notify crypto layer */ int32_t sc_cid; /* crypto tag */ struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */ struct safe_ringentry *sc_ring; /* PE ring */ struct safe_ringentry *sc_ringtop; /* PE ring top */ struct safe_ringentry *sc_front; /* next free entry */ struct safe_ringentry *sc_back; /* next pending entry */ int sc_nqchip; /* # passed to chip */ struct mtx sc_ringmtx; /* PE ring lock */ struct safe_pdesc *sc_spring; /* src particle ring */ struct safe_pdesc *sc_springtop; /* src particle ring top */ struct safe_pdesc *sc_spfree; /* next free src particle */ struct safe_dma_alloc sc_spalloc; /* src particle ring state */ struct safe_pdesc *sc_dpring; /* dest particle ring */ struct safe_pdesc *sc_dpringtop; /* dest particle ring top */ struct safe_pdesc *sc_dpfree; /* next free dest particle */ struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */ - int sc_nsessions; /* # of sessions */ - struct safe_session *sc_sessions; /* sessions */ struct callout sc_rngto; /* rng timeout */ struct rndtest_state *sc_rndtest; /* RNG test state */ void (*sc_harvest)(struct rndtest_state *, void *, u_int); }; #endif /* _KERNEL */ struct safe_stats { u_int64_t st_ibytes; u_int64_t st_obytes; u_int32_t st_ipackets; u_int32_t st_opackets; u_int32_t st_invalid; /* invalid argument */ u_int32_t st_badsession; /* invalid session id */ u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */ u_int32_t st_nodesc; /* op submitted w/o descriptors */ u_int32_t st_badalg; /* unsupported algorithm */ u_int32_t st_ringfull; /* PE descriptor ring full */ u_int32_t st_peoperr; /* PE marked error */ u_int32_t st_dmaerr; /* PE DMA error */ u_int32_t st_bypasstoobig; /* bypass > 96 bytes */ u_int32_t st_skipmismatch; /* enc part begins before auth part */ u_int32_t st_lenmismatch; /* enc length different auth length */ u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */ u_int32_t st_cofftoobig; /* crypto offset > 255 words */ u_int32_t st_iovmisaligned; /* iov op not aligned */ u_int32_t st_iovnotuniform; /* iov op not suitable */ u_int32_t st_unaligned; /* unaligned src caused copy */ u_int32_t st_notuniform; /* non-uniform src caused copy */ u_int32_t st_nomap; /* bus_dmamap_create failed */ u_int32_t st_noload; /* bus_dmamap_load_* failed */ u_int32_t st_nombuf; /* MGET* failed */ u_int32_t st_nomcl; /* MCLGET* failed */ u_int32_t st_maxqchip; /* max mcr1 ops out for processing */ u_int32_t st_rng; /* RNG requests */ u_int32_t st_rngalarm; /* RNG alarm requests */ u_int32_t st_noicvcopy; /* ICV data copies suppressed */ }; #endif /* _SAFE_SAFEVAR_H_ */ Index: head/sys/dev/sec/sec.c =================================================================== --- head/sys/dev/sec/sec.c (revision 336438) +++ head/sys/dev/sec/sec.c (revision 336439) @@ -1,1878 +1,1785 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and * 3.0 are supported. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include static int sec_probe(device_t dev); static int sec_attach(device_t dev); static int sec_detach(device_t dev); static int sec_suspend(device_t dev); static int sec_resume(device_t dev); static int sec_shutdown(device_t dev); static void sec_primary_intr(void *arg); static void sec_secondary_intr(void *arg); static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname); static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname); static int sec_controller_reset(struct sec_softc *sc); static int sec_channel_reset(struct sec_softc *sc, int channel, int full); static int sec_init(struct sec_softc *sc); static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size); static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type, struct sec_desc_map_info *sdmi); static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); static void sec_enqueue(struct sec_softc *sc); static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel); static int sec_eu_channel(struct sec_softc *sc, int eu); static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype); static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); -static int sec_alloc_session(struct sec_softc *sc); -static int sec_newsession(device_t dev, u_int32_t *sidp, +static int sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri); -static int sec_freesession(device_t dev, uint64_t tid); static int sec_process(device_t dev, struct cryptop *crp, int hint); static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, struct cryptoini **mac); static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, struct cryptodesc **mac); static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, int buftype); static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac, int buftype); -static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid); static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); /* AESU */ static int sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); /* DEU */ static int sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); /* MDEU */ static int sec_mdeu_can_handle(u_int alg); static int sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen); static int sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); static int sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); static device_method_t sec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sec_probe), DEVMETHOD(device_attach, sec_attach), DEVMETHOD(device_detach, sec_detach), DEVMETHOD(device_suspend, sec_suspend), DEVMETHOD(device_resume, sec_resume), DEVMETHOD(device_shutdown, sec_shutdown), /* Crypto methods */ DEVMETHOD(cryptodev_newsession, sec_newsession), - DEVMETHOD(cryptodev_freesession,sec_freesession), DEVMETHOD(cryptodev_process, sec_process), DEVMETHOD_END }; static driver_t sec_driver = { "sec", sec_methods, sizeof(struct sec_softc), }; static devclass_t sec_devclass; DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0); MODULE_DEPEND(sec, crypto, 1, 1, 1); static struct sec_eu_methods sec_eus[] = { { sec_aesu_newsession, sec_aesu_make_desc, }, { sec_deu_newsession, sec_deu_make_desc, }, { sec_mdeu_newsession, sec_mdeu_make_desc, }, { NULL, NULL } }; static inline void sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (dma_mem->dma_vaddr != NULL) bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); } -static inline void -sec_free_session(struct sec_softc *sc, struct sec_session *ses) -{ - - SEC_LOCK(sc, sessions); - ses->ss_used = 0; - SEC_UNLOCK(sc, sessions); -} - static inline void * sec_get_pointer_data(struct sec_desc *desc, u_int n) { return (desc->sd_ptr_dmem[n].dma_vaddr); } static int sec_probe(device_t dev) { struct sec_softc *sc; uint64_t id; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) return (ENXIO); sc = device_get_softc(dev); sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); id = SEC_READ(sc, SEC_ID); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); switch (id) { case SEC_20_ID: device_set_desc(dev, "Freescale Security Engine 2.0"); sc->sc_version = 2; break; case SEC_30_ID: device_set_desc(dev, "Freescale Security Engine 3.0"); sc->sc_version = 3; break; case SEC_31_ID: device_set_desc(dev, "Freescale Security Engine 3.1"); sc->sc_version = 3; break; default: device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id); return (ENXIO); } return (0); } static int sec_attach(device_t dev) { struct sec_softc *sc; struct sec_hw_lt *lt; int error = 0; int i; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_blocked = 0; sc->sc_shutdown = 0; - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver ID!\n"); return (ENXIO); } /* Init locks */ mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), "SEC Controller lock", MTX_DEF); mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), "SEC Descriptors lock", MTX_DEF); - mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), - "SEC Sessions lock", MTX_DEF); /* Allocate I/O memory for SEC registers */ sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { device_printf(dev, "could not allocate I/O memory!\n"); goto fail1; } sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* Setup interrupts */ sc->sc_pri_irid = 0; error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, &sc->sc_pri_irid, sec_primary_intr, "primary"); if (error) goto fail2; if (sc->sc_version == 3) { sc->sc_sec_irid = 1; error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, &sc->sc_sec_irid, sec_secondary_intr, "secondary"); if (error) goto fail3; } /* Alloc DMA memory for descriptors and link tables */ error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); if (error) goto fail4; error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); if (error) goto fail5; /* Fill in descriptors and link tables */ for (i = 0; i < SEC_DESCRIPTORS; i++) { sc->sc_desc[i].sd_desc = (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + (i * sizeof(struct sec_hw_desc)); } for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { sc->sc_lt[i].sl_lt = (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + (i * sizeof(struct sec_hw_lt)); } /* Last entry in link table is used to create a circle */ lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; lt->shl_length = 0; lt->shl_r = 0; lt->shl_n = 1; lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; /* Init descriptor and link table queues pointers */ SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); /* Create masks for fast checks */ sc->sc_int_error_mask = 0; for (i = 0; i < SEC_CHANNELS; i++) sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); switch (sc->sc_version) { case 2: sc->sc_channel_idle_mask = (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); break; case 3: sc->sc_channel_idle_mask = (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); break; } /* Init hardware */ error = sec_init(sc); if (error) goto fail6; /* Register in OCF (AESU) */ crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); /* Register in OCF (DEU) */ crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); /* Register in OCF (MDEU) */ crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0); if (sc->sc_version >= 3) { crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0); } return (0); fail6: sec_free_dma_mem(&(sc->sc_lt_dmem)); fail5: sec_free_dma_mem(&(sc->sc_desc_dmem)); fail4: sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); fail3: sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); fail2: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); fail1: mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); - mtx_destroy(&sc->sc_sessions_lock); return (ENXIO); } static int sec_detach(device_t dev) { struct sec_softc *sc = device_get_softc(dev); int i, error, timeout = SEC_TIMEOUT; /* Prepare driver to shutdown */ SEC_LOCK(sc, descriptors); sc->sc_shutdown = 1; SEC_UNLOCK(sc, descriptors); /* Wait until all queued processing finishes */ while (1) { SEC_LOCK(sc, descriptors); i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); SEC_UNLOCK(sc, descriptors); if (i == 0) break; if (timeout < 0) { device_printf(dev, "queue flush timeout!\n"); /* DMA can be still active - stop it */ for (i = 0; i < SEC_CHANNELS; i++) sec_channel_reset(sc, i, 1); break; } timeout -= 1000; DELAY(1000); } /* Disable interrupts */ SEC_WRITE(sc, SEC_IER, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA memory */ for (i = 0; i < SEC_DESCRIPTORS; i++) SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); sec_free_dma_mem(&(sc->sc_lt_dmem)); sec_free_dma_mem(&(sc->sc_desc_dmem)); /* Release interrupts */ sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); /* Release memory */ if (sc->sc_rres) { error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); if (error) device_printf(dev, "bus_release_resource() failed for" " I/O memory, error %d\n", error); sc->sc_rres = NULL; } mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); - mtx_destroy(&sc->sc_sessions_lock); return (0); } static int sec_suspend(device_t dev) { return (0); } static int sec_resume(device_t dev) { return (0); } static int sec_shutdown(device_t dev) { return (0); } static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname) { int error; (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, RF_ACTIVE); if ((*ires) == NULL) { device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); return (ENXIO); } error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, sc, ihand); if (error) { device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) device_printf(sc->sc_dev, "could not release %s IRQ\n", iname); (*ires) = NULL; return (error); } return (0); } static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname) { int error; if (ires == NULL) return; error = bus_teardown_intr(sc->sc_dev, ires, ihand); if (error) device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" " IRQ, error %d\n", iname, error); error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); if (error) device_printf(sc->sc_dev, "bus_release_resource() failed for %s" " IRQ, error %d\n", iname, error); } static void sec_primary_intr(void *arg) { struct sec_softc *sc = arg; struct sec_desc *desc; uint64_t isr; int i, wakeup = 0; SEC_LOCK(sc, controller); /* Check for errors */ isr = SEC_READ(sc, SEC_ISR); if (isr & sc->sc_int_error_mask) { /* Check each channel for error */ for (i = 0; i < SEC_CHANNELS; i++) { if ((isr & SEC_INT_CH_ERR(i)) == 0) continue; device_printf(sc->sc_dev, "I/O error on channel %i!\n", i); /* Find and mark problematic descriptor */ desc = sec_find_desc(sc, SEC_READ(sc, SEC_CHAN_CDPR(i))); if (desc != NULL) desc->sd_error = EIO; /* Do partial channel reset */ sec_channel_reset(sc, i, 0); } } /* ACK interrupt */ SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); SEC_UNLOCK(sc, controller); SEC_LOCK(sc, descriptors); /* Handle processed descriptors */ SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); while (SEC_QUEUED_DESC_CNT(sc) > 0) { desc = SEC_GET_QUEUED_DESC(sc); if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { SEC_PUT_BACK_QUEUED_DESC(sc); break; } SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); desc->sd_crp->crp_etype = desc->sd_error; crypto_done(desc->sd_crp); SEC_DESC_FREE_POINTERS(desc); SEC_DESC_FREE_LT(sc, desc); SEC_DESC_QUEUED2FREE(sc); } SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (!sc->sc_shutdown) { wakeup = sc->sc_blocked; sc->sc_blocked = 0; } SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); if (wakeup) crypto_unblock(sc->sc_cid, wakeup); } static void sec_secondary_intr(void *arg) { struct sec_softc *sc = arg; device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); sec_primary_intr(arg); } static int sec_controller_reset(struct sec_softc *sc) { int timeout = SEC_TIMEOUT; /* Reset Controller */ SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "device reset!\n"); return (ETIMEDOUT); } } return (0); } static int sec_channel_reset(struct sec_softc *sc, int channel, int full) { int timeout = SEC_TIMEOUT; uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; uint64_t reg; /* Reset Channel */ reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "channel reset!\n"); return (ETIMEDOUT); } } if (full) { reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; switch(sc->sc_version) { case 2: reg |= SEC_CHAN_CCR_CDWE; break; case 3: reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; break; } SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); } return (0); } static int sec_init(struct sec_softc *sc) { uint64_t reg; int error, i; /* Reset controller twice to clear all pending interrupts */ error = sec_controller_reset(sc); if (error) return (error); error = sec_controller_reset(sc); if (error) return (error); /* Reset channels */ for (i = 0; i < SEC_CHANNELS; i++) { error = sec_channel_reset(sc, i, 1); if (error) return (error); } /* Enable Interrupts */ reg = SEC_INT_ITO; for (i = 0; i < SEC_CHANNELS; i++) reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); SEC_WRITE(sc, SEC_IER, reg); return (error); } static void sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_dma_mem *dma_mem = arg; if (error) return; KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); dma_mem->dma_paddr = segs->ds_addr; } static void sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_desc_map_info *sdmi = arg; struct sec_softc *sc = sdmi->sdmi_sc; struct sec_lt *lt = NULL; bus_addr_t addr; bus_size_t size; int i; SEC_LOCK_ASSERT(sc, descriptors); if (error) return; for (i = 0; i < nseg; i++) { addr = segs[i].ds_addr; size = segs[i].ds_len; /* Skip requested offset */ if (sdmi->sdmi_offset >= size) { sdmi->sdmi_offset -= size; continue; } addr += sdmi->sdmi_offset; size -= sdmi->sdmi_offset; sdmi->sdmi_offset = 0; /* Do not link more than requested */ if (sdmi->sdmi_size < size) size = sdmi->sdmi_size; lt = SEC_ALLOC_LT_ENTRY(sc); lt->sl_lt->shl_length = size; lt->sl_lt->shl_r = 0; lt->sl_lt->shl_n = 0; lt->sl_lt->shl_ptr = addr; if (sdmi->sdmi_lt_first == NULL) sdmi->sdmi_lt_first = lt; sdmi->sdmi_lt_used += 1; if ((sdmi->sdmi_size -= size) == 0) break; } sdmi->sdmi_lt_last = lt; } static void sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t size, int error) { sec_dma_map_desc_cb(arg, segs, nseg, error); } static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } dma_mem->dma_is_map = 0; return (0); err3: bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); err2: bus_dma_tag_destroy(dma_mem->dma_tag); err1: dma_mem->dma_vaddr = NULL; return(error); } static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type, struct sec_desc_map_info *sdmi) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); switch (type) { case SEC_MEMORY: break; case SEC_UIO: size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; break; case SEC_MBUF: size = m_length((struct mbuf*)mem, NULL); break; default: return (EINVAL); } error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ SEC_FREE_LT_CNT(sc), /* nsegments */ SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); dma_mem->dma_vaddr = NULL; return (error); } error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to create DMA map, error %i!" "\n", error); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } switch (type) { case SEC_MEMORY: error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); break; case SEC_UIO: error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map, mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); break; case SEC_MBUF: error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map, mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT); break; } if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i!\n", error); bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } dma_mem->dma_is_map = 1; dma_mem->dma_vaddr = mem; return (0); } static void sec_free_dma_mem(struct sec_dma_mem *dma_mem) { /* Check for double free */ if (dma_mem->dma_vaddr == NULL) return; bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); if (dma_mem->dma_is_map) bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); else bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); dma_mem->dma_vaddr = NULL; } static int sec_eu_channel(struct sec_softc *sc, int eu) { uint64_t reg; int channel = 0; SEC_LOCK_ASSERT(sc, controller); reg = SEC_READ(sc, SEC_EUASR); switch (eu) { case SEC_EU_AFEU: channel = SEC_EUASR_AFEU(reg); break; case SEC_EU_DEU: channel = SEC_EUASR_DEU(reg); break; case SEC_EU_MDEU_A: case SEC_EU_MDEU_B: channel = SEC_EUASR_MDEU(reg); break; case SEC_EU_RNGU: channel = SEC_EUASR_RNGU(reg); break; case SEC_EU_PKEU: channel = SEC_EUASR_PKEU(reg); break; case SEC_EU_AESU: channel = SEC_EUASR_AESU(reg); break; case SEC_EU_KEU: channel = SEC_EUASR_KEU(reg); break; case SEC_EU_CRCU: channel = SEC_EUASR_CRCU(reg); break; } return (channel - 1); } static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) { u_int fflvl = SEC_MAX_FIFO_LEVEL; uint64_t reg; int i; SEC_LOCK_ASSERT(sc, controller); /* Find free channel if have not got one */ if (channel < 0) { for (i = 0; i < SEC_CHANNELS; i++) { reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); if ((reg & sc->sc_channel_idle_mask) == 0) { channel = i; break; } } } /* There is no free channel */ if (channel < 0) return (-1); /* Check FIFO level on selected channel */ reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); switch(sc->sc_version) { case 2: fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; break; case 3: fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; break; } if (fflvl >= SEC_MAX_FIFO_LEVEL) return (-1); /* Enqueue descriptor in channel */ SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); return (channel); } static void sec_enqueue(struct sec_softc *sc) { struct sec_desc *desc; int ch0, ch1; SEC_LOCK(sc, descriptors); SEC_LOCK(sc, controller); while (SEC_READY_DESC_CNT(sc) > 0) { desc = SEC_GET_READY_DESC(sc); ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); /* * Both EU are used by the same channel. * Enqueue descriptor in channel used by busy EUs. */ if (ch0 >= 0 && ch0 == ch1) { if (sec_enqueue_desc(sc, desc, ch0) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Only one EU is free. * Enqueue descriptor in channel used by busy EU. */ if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Both EU are free. * Enqueue descriptor in first free channel. */ if (ch0 < 0 && ch1 < 0) { if (sec_enqueue_desc(sc, desc, -1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* Current descriptor can not be queued at the moment */ SEC_PUT_BACK_READY_DESC(sc); break; } SEC_UNLOCK(sc, controller); SEC_UNLOCK(sc, descriptors); } static struct sec_desc * sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) { struct sec_desc *desc = NULL; int i; SEC_LOCK_ASSERT(sc, descriptors); for (i = 0; i < SEC_CHANNELS; i++) { if (sc->sc_desc[i].sd_desc_paddr == paddr) { desc = &(sc->sc_desc[i]); break; } } return (desc); } static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize) { struct sec_hw_desc_ptr *ptr; SEC_LOCK_ASSERT(sc, descriptors); ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 0; ptr->shdp_ptr = data; return (0); } static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype) { struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; struct sec_hw_desc_ptr *ptr; int error; SEC_LOCK_ASSERT(sc, descriptors); /* For flat memory map only requested region */ if (dtype == SEC_MEMORY) { data = (uint8_t*)(data) + doffset; sdmi.sdmi_offset = 0; } error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize, dtype, &sdmi); if (error) return (error); sdmi.sdmi_lt_last->sl_lt->shl_r = 1; desc->sd_lt_used += sdmi.sdmi_lt_used; ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 1; ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; return (0); } static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc, struct cryptoini **mac) { struct cryptoini *e, *m; e = cri; m = cri->cri_next; /* We can haldle only two operations */ if (m && m->cri_next) return (EINVAL); if (sec_mdeu_can_handle(e->cri_alg)) { cri = m; m = e; e = cri; } if (m && !sec_mdeu_can_handle(m->cri_alg)) return (EINVAL); *enc = e; *mac = m; return (0); } static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc, struct cryptodesc **mac) { struct cryptodesc *e, *m, *t; e = crp->crp_desc; m = e->crd_next; /* We can haldle only two operations */ if (m && m->crd_next) return (EINVAL); if (sec_mdeu_can_handle(e->crd_alg)) { t = m; m = e; e = t; } if (m && !sec_mdeu_can_handle(m->crd_alg)) return (EINVAL); *enc = e; *mac = m; return (0); } static int -sec_alloc_session(struct sec_softc *sc) +sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { - struct sec_session *ses = NULL; - int sid = -1; - u_int i; - - SEC_LOCK(sc, sessions); - - for (i = 0; i < SEC_MAX_SESSIONS; i++) { - if (sc->sc_sessions[i].ss_used == 0) { - ses = &(sc->sc_sessions[i]); - ses->ss_used = 1; - ses->ss_ivlen = 0; - ses->ss_klen = 0; - ses->ss_mklen = 0; - sid = i; - break; - } - } - - SEC_UNLOCK(sc, sessions); - - return (sid); -} - -static struct sec_session * -sec_get_session(struct sec_softc *sc, u_int sid) -{ - struct sec_session *ses; - - if (sid >= SEC_MAX_SESSIONS) - return (NULL); - - SEC_LOCK(sc, sessions); - - ses = &(sc->sc_sessions[sid]); - - if (ses->ss_used == 0) - ses = NULL; - - SEC_UNLOCK(sc, sessions); - - return (ses); -} - -static int -sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) -{ struct sec_softc *sc = device_get_softc(dev); struct sec_eu_methods *eu = sec_eus; struct cryptoini *enc = NULL; struct cryptoini *mac = NULL; struct sec_session *ses; int error = -1; - int sid; error = sec_split_cri(cri, &enc, &mac); if (error) return (error); /* Check key lengths */ if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN) return (E2BIG); if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN) return (E2BIG); /* Only SEC 3.0 supports digests larger than 256 bits */ if (sc->sc_version < 3 && mac && mac->cri_klen > 256) return (E2BIG); - sid = sec_alloc_session(sc); - if (sid < 0) - return (ENOMEM); + ses = crypto_get_driver_session(cses); - ses = sec_get_session(sc, sid); - /* Find EU for this session */ while (eu->sem_make_desc != NULL) { error = eu->sem_newsession(sc, ses, enc, mac); if (error >= 0) break; eu++; } /* If not found, return EINVAL */ - if (error < 0) { - sec_free_session(sc, ses); + if (error < 0) return (EINVAL); - } /* Save cipher key */ if (enc && enc->cri_key) { ses->ss_klen = enc->cri_klen / 8; memcpy(ses->ss_key, enc->cri_key, ses->ss_klen); } /* Save digest key */ if (mac && mac->cri_key) { ses->ss_mklen = mac->cri_klen / 8; memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen); } ses->ss_eu = eu; - *sidp = sid; - return (0); } static int -sec_freesession(device_t dev, uint64_t tid) -{ - struct sec_softc *sc = device_get_softc(dev); - struct sec_session *ses; - int error = 0; - - ses = sec_get_session(sc, CRYPTO_SESID2LID(tid)); - if (ses == NULL) - return (EINVAL); - - sec_free_session(sc, ses); - - return (error); -} - -static int sec_process(device_t dev, struct cryptop *crp, int hint) { struct sec_softc *sc = device_get_softc(dev); struct sec_desc *desc = NULL; struct cryptodesc *mac, *enc; struct sec_session *ses; int buftype, error = 0; - /* Check Session ID */ - ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); - if (ses == NULL) { - crp->crp_etype = EINVAL; - crypto_done(crp); - return (0); - } + ses = crypto_get_driver_session(crp->crp_session); /* Check for input length */ if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } /* Get descriptors */ if (sec_split_crp(crp, &enc, &mac)) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } SEC_LOCK(sc, descriptors); SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Block driver if there is no free descriptors or we are going down */ if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { sc->sc_blocked |= CRYPTO_SYMQ; SEC_UNLOCK(sc, descriptors); return (ERESTART); } /* Prepare descriptor */ desc = SEC_GET_FREE_DESC(sc); desc->sd_lt_used = 0; desc->sd_error = 0; desc->sd_crp = crp; if (crp->crp_flags & CRYPTO_F_IOV) buftype = SEC_UIO; else if (crp->crp_flags & CRYPTO_F_IMBUF) buftype = SEC_MBUF; else buftype = SEC_MEMORY; if (enc && enc->crd_flags & CRD_F_ENCRYPT) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(desc->sd_desc->shd_iv, enc->crd_iv, ses->ss_ivlen); else arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0); if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enc->crd_inject, ses->ss_ivlen, desc->sd_desc->shd_iv); } else if (enc) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(desc->sd_desc->shd_iv, enc->crd_iv, ses->ss_ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enc->crd_inject, ses->ss_ivlen, desc->sd_desc->shd_iv); } if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) { ses->ss_klen = enc->crd_klen / 8; memcpy(ses->ss_key, enc->crd_key, ses->ss_klen); } else error = E2BIG; } if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) { ses->ss_mklen = mac->crd_klen / 8; memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen); } else error = E2BIG; } if (!error) { memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen); memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen); error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype); } if (error) { SEC_DESC_FREE_POINTERS(desc); SEC_DESC_PUT_BACK_LT(sc, desc); SEC_PUT_BACK_FREE_DESC(sc); SEC_UNLOCK(sc, descriptors); crp->crp_etype = error; crypto_done(crp); return (0); } /* * Skip DONE interrupt if this is not last request in burst, but only * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE * signaling on each descriptor. */ if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) desc->sd_desc->shd_dn = 0; else desc->sd_desc->shd_dn = 1; SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_FREE2READY(sc); SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); return (0); } static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: IV IN */ error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); if (error) return (error); /* Pointer 3: Data IN */ error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 4: Data OUT */ error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 5: IV OUT (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 5, 0, 0); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (error); } static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; if (mac->crd_len < enc->crd_len) return (EINVAL); if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len) return (EINVAL); error = sec_mdeu_config(mac, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_HMAC_SNOOP; hd->shd_eu_sel1 = eu; hd->shd_mode1 = mode; /* Pointer 0: HMAC Key */ error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen); if (error) return (error); /* Pointer 1: HMAC-Only Data IN */ error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip, mac->crd_len - enc->crd_len, buftype); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), ses->ss_klen); if (error) return (error); /* Pointer 3: IV IN */ error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen); if (error) return (error); /* Pointer 4: Data IN */ error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 5: Data OUT */ error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip, enc->crd_len, buftype); if (error) return (error); /* Pointer 6: HMAC OUT */ error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject, hashlen, buftype); return (error); } /* AESU */ static int sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (enc == NULL) return (-1); if (enc->cri_alg != CRYPTO_AES_CBC) return (-1); ses->ss_ivlen = AES_BLOCK_LEN; return (0); } static int sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; struct cryptodesc *enc, *mac; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (!enc) return (EINVAL); hd->shd_eu_sel0 = SEC_EU_AESU; hd->shd_mode0 = SEC_AESU_MODE_CBC; if (enc->crd_alg != CRYPTO_AES_CBC) return (EINVAL); if (enc->crd_flags & CRD_F_ENCRYPT) { hd->shd_mode0 |= SEC_AESU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (mac) error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, buftype); else error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, buftype); return (error); } /* DEU */ static int sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (enc == NULL) return (-1); switch (enc->cri_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: break; default: return (-1); } ses->ss_ivlen = DES_BLOCK_LEN; return (0); } static int sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct sec_hw_desc *hd = desc->sd_desc; struct cryptodesc *enc, *mac; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (!enc) return (EINVAL); hd->shd_eu_sel0 = SEC_EU_DEU; hd->shd_mode0 = SEC_DEU_MODE_CBC; switch (enc->crd_alg) { case CRYPTO_3DES_CBC: hd->shd_mode0 |= SEC_DEU_MODE_TS; break; case CRYPTO_DES_CBC: break; default: return (EINVAL); } if (enc->crd_flags & CRD_F_ENCRYPT) { hd->shd_mode0 |= SEC_DEU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (mac) error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac, buftype); else error = sec_build_common_ns_desc(sc, desc, ses, crp, enc, buftype); return (error); } /* MDEU */ static int sec_mdeu_can_handle(u_int alg) { switch (alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: return (1); default: return (0); } } static int sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen) { *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; *eu = SEC_EU_NONE; switch (crd->crd_alg) { case CRYPTO_MD5_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_MD5: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_MD5; *hashlen = MD5_HASH_LEN; break; case CRYPTO_SHA1_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_SHA1: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_SHA1; *hashlen = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; *eu = SEC_EU_MDEU_A; break; case CRYPTO_SHA2_384_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; *eu = SEC_EU_MDEU_B; break; case CRYPTO_SHA2_512_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; *eu = SEC_EU_MDEU_B; break; default: return (EINVAL); } if (*mode & SEC_MDEU_MODE_HMAC) *hashlen = SEC_HMAC_HASH_LEN; return (0); } static int sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac) { if (mac && sec_mdeu_can_handle(mac->cri_alg)) return (0); return (-1); } static int sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype) { struct cryptodesc *enc, *mac; struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_split_crp(crp, &enc, &mac); if (error) return (error); if (enc) return (EINVAL); error = sec_mdeu_config(mac, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel0 = eu; hd->shd_mode0 = mode; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: Context In (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 1, 0, 0); if (error) return (error); /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen); else error = sec_make_pointer_direct(sc, desc, 2, 0, 0); if (error) return (error); /* Pointer 3: Input Data */ error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip, mac->crd_len, buftype); if (error) return (error); /* Pointer 4: NULL */ error = sec_make_pointer_direct(sc, desc, 4, 0, 0); if (error) return (error); /* Pointer 5: Hash out */ error = sec_make_pointer(sc, desc, 5, crp->crp_buf, mac->crd_inject, hashlen, buftype); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (0); } Index: head/sys/dev/sec/sec.h =================================================================== --- head/sys/dev/sec/sec.h (revision 336438) +++ head/sys/dev/sec/sec.h (revision 336439) @@ -1,429 +1,425 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SEC_H #define _SEC_H /* * Each SEC channel can hold up to 24 descriptors. All 4 channels can be * simultaneously active holding 96 descriptors. Each descriptor can use 0 or * more link table entries depending of size and granulation of input/output * data. One link table entry is needed for each 65535 bytes of data. */ /* Driver settings */ #define SEC_TIMEOUT 100000 #define SEC_MAX_SESSIONS 256 #define SEC_DESCRIPTORS 256 /* Must be power of 2 */ #define SEC_LT_ENTRIES 1024 /* Must be power of 2 */ #define SEC_MAX_IV_LEN 16 #define SEC_MAX_KEY_LEN 64 /* SEC information */ #define SEC_20_ID 0x0000000000000040ULL #define SEC_30_ID 0x0030030000000000ULL #define SEC_31_ID 0x0030030100000000ULL #define SEC_CHANNELS 4 #define SEC_POINTERS 7 #define SEC_MAX_DMA_BLOCK_SIZE 0xFFFF #define SEC_MAX_FIFO_LEVEL 24 #define SEC_DMA_ALIGNMENT 8 #define __packed__ __attribute__ ((__packed__)) struct sec_softc; struct sec_session; /* SEC descriptor definition */ struct sec_hw_desc_ptr { u_int shdp_length : 16; u_int shdp_j : 1; u_int shdp_extent : 7; u_int __padding0 : 4; uint64_t shdp_ptr : 36; } __packed__; struct sec_hw_desc { union __packed__ { struct __packed__ { u_int eu_sel0 : 4; u_int mode0 : 8; u_int eu_sel1 : 4; u_int mode1 : 8; u_int desc_type : 5; u_int __padding0 : 1; u_int dir : 1; u_int dn : 1; u_int __padding1 : 32; } request; struct __packed__ { u_int done : 8; u_int __padding0 : 27; u_int iccr0 : 2; u_int __padding1 : 6; u_int iccr1 : 2; u_int __padding2 : 19; } feedback; } shd_control; struct sec_hw_desc_ptr shd_pointer[SEC_POINTERS]; /* Data below is mapped to descriptor pointers */ uint8_t shd_iv[SEC_MAX_IV_LEN]; uint8_t shd_key[SEC_MAX_KEY_LEN]; uint8_t shd_mkey[SEC_MAX_KEY_LEN]; } __packed__; #define shd_eu_sel0 shd_control.request.eu_sel0 #define shd_mode0 shd_control.request.mode0 #define shd_eu_sel1 shd_control.request.eu_sel1 #define shd_mode1 shd_control.request.mode1 #define shd_desc_type shd_control.request.desc_type #define shd_dir shd_control.request.dir #define shd_dn shd_control.request.dn #define shd_done shd_control.feedback.done #define shd_iccr0 shd_control.feedback.iccr0 #define shd_iccr1 shd_control.feedback.iccr1 /* SEC link table entries definition */ struct sec_hw_lt { u_int shl_length : 16; u_int __padding0 : 6; u_int shl_r : 1; u_int shl_n : 1; u_int __padding1 : 4; uint64_t shl_ptr : 36; } __packed__; struct sec_dma_mem { void *dma_vaddr; bus_addr_t dma_paddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; u_int dma_is_map; }; struct sec_desc { struct sec_hw_desc *sd_desc; bus_addr_t sd_desc_paddr; struct sec_dma_mem sd_ptr_dmem[SEC_POINTERS]; struct cryptop *sd_crp; u_int sd_lt_used; u_int sd_error; }; struct sec_lt { struct sec_hw_lt *sl_lt; bus_addr_t sl_lt_paddr; }; struct sec_eu_methods { int (*sem_newsession)(struct sec_softc *sc, struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac); int (*sem_make_desc)(struct sec_softc *sc, struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp, int buftype); }; struct sec_session { - u_int ss_used; struct sec_eu_methods *ss_eu; uint8_t ss_key[SEC_MAX_KEY_LEN]; uint8_t ss_mkey[SEC_MAX_KEY_LEN]; u_int ss_klen; u_int ss_mklen; u_int ss_ivlen; }; struct sec_desc_map_info { struct sec_softc *sdmi_sc; bus_size_t sdmi_size; bus_size_t sdmi_offset; struct sec_lt *sdmi_lt_first; struct sec_lt *sdmi_lt_last; u_int sdmi_lt_used; }; struct sec_softc { device_t sc_dev; int32_t sc_cid; int sc_blocked; int sc_shutdown; u_int sc_version; uint64_t sc_int_error_mask; uint64_t sc_channel_idle_mask; - struct sec_session sc_sessions[SEC_MAX_SESSIONS]; - struct mtx sc_controller_lock; struct mtx sc_descriptors_lock; - struct mtx sc_sessions_lock; struct sec_desc sc_desc[SEC_DESCRIPTORS]; u_int sc_free_desc_get_cnt; u_int sc_free_desc_put_cnt; u_int sc_ready_desc_get_cnt; u_int sc_ready_desc_put_cnt; u_int sc_queued_desc_get_cnt; u_int sc_queued_desc_put_cnt; struct sec_lt sc_lt[SEC_LT_ENTRIES + 1]; u_int sc_lt_alloc_cnt; u_int sc_lt_free_cnt; struct sec_dma_mem sc_desc_dmem; /* descriptors DMA memory */ struct sec_dma_mem sc_lt_dmem; /* link tables DMA memory */ struct resource *sc_rres; /* register resource */ int sc_rrid; /* register rid */ struct { bus_space_tag_t bst; bus_space_handle_t bsh; } sc_bas; struct resource *sc_pri_ires; /* primary irq resource */ void *sc_pri_ihand; /* primary irq handler */ int sc_pri_irid; /* primary irq resource id */ struct resource *sc_sec_ires; /* secondary irq resource */ void *sc_sec_ihand; /* secondary irq handler */ int sc_sec_irid; /* secondary irq resource id */ }; /* Locking macros */ #define SEC_LOCK(sc, what) \ mtx_lock(&(sc)->sc_ ## what ## _lock) #define SEC_UNLOCK(sc, what) \ mtx_unlock(&(sc)->sc_ ## what ## _lock) #define SEC_LOCK_ASSERT(sc, what) \ mtx_assert(&(sc)->sc_ ## what ## _lock, MA_OWNED) /* Read/Write definitions */ #define SEC_READ(sc, reg) \ bus_space_read_8((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg)) #define SEC_WRITE(sc, reg, val) \ bus_space_write_8((sc)->sc_bas.bst, (sc)->sc_bas.bsh, (reg), (val)) /* Base allocation macros (warning: wrap must be 2^n) */ #define SEC_CNT_INIT(sc, cnt, wrap) \ (((sc)->cnt) = ((wrap) - 1)) #define SEC_ADD(sc, cnt, wrap, val) \ ((sc)->cnt = (((sc)->cnt) + (val)) & ((wrap) - 1)) #define SEC_INC(sc, cnt, wrap) \ SEC_ADD(sc, cnt, wrap, 1) #define SEC_DEC(sc, cnt, wrap) \ SEC_ADD(sc, cnt, wrap, -1) #define SEC_GET_GENERIC(sc, tab, cnt, wrap) \ ((sc)->tab[SEC_INC(sc, cnt, wrap)]) #define SEC_PUT_GENERIC(sc, tab, cnt, wrap, val) \ ((sc)->tab[SEC_INC(sc, cnt, wrap)] = val) /* Interface for descriptors */ #define SEC_GET_FREE_DESC(sc) \ &SEC_GET_GENERIC(sc, sc_desc, sc_free_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_PUT_BACK_FREE_DESC(sc) \ SEC_DEC(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_DESC_FREE2READY(sc) \ SEC_INC(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS) #define SEC_GET_READY_DESC(sc) \ &SEC_GET_GENERIC(sc, sc_desc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_PUT_BACK_READY_DESC(sc) \ SEC_DEC(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_DESC_READY2QUEUED(sc) \ SEC_INC(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS) #define SEC_GET_QUEUED_DESC(sc) \ &SEC_GET_GENERIC(sc, sc_desc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_PUT_BACK_QUEUED_DESC(sc) \ SEC_DEC(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS) #define SEC_DESC_QUEUED2FREE(sc) \ SEC_INC(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS) #define SEC_FREE_DESC_CNT(sc) \ (((sc)->sc_free_desc_put_cnt - (sc)->sc_free_desc_get_cnt - 1) \ & (SEC_DESCRIPTORS - 1)) #define SEC_READY_DESC_CNT(sc) \ (((sc)->sc_ready_desc_put_cnt - (sc)->sc_ready_desc_get_cnt) & \ (SEC_DESCRIPTORS - 1)) #define SEC_QUEUED_DESC_CNT(sc) \ (((sc)->sc_queued_desc_put_cnt - (sc)->sc_queued_desc_get_cnt) \ & (SEC_DESCRIPTORS - 1)) #define SEC_DESC_SYNC(sc, mode) do { \ sec_sync_dma_mem(&((sc)->sc_desc_dmem), (mode)); \ sec_sync_dma_mem(&((sc)->sc_lt_dmem), (mode)); \ } while (0) #define SEC_DESC_SYNC_POINTERS(desc, mode) do { \ u_int i; \ for (i = 0; i < SEC_POINTERS; i++) \ sec_sync_dma_mem(&((desc)->sd_ptr_dmem[i]), (mode)); \ } while (0) #define SEC_DESC_FREE_POINTERS(desc) do { \ u_int i; \ for (i = 0; i < SEC_POINTERS; i++) \ sec_free_dma_mem(&(desc)->sd_ptr_dmem[i]); \ } while (0); #define SEC_DESC_PUT_BACK_LT(sc, desc) \ SEC_PUT_BACK_LT(sc, (desc)->sd_lt_used) #define SEC_DESC_FREE_LT(sc, desc) \ SEC_FREE_LT(sc, (desc)->sd_lt_used) /* Interface for link tables */ #define SEC_ALLOC_LT_ENTRY(sc) \ &SEC_GET_GENERIC(sc, sc_lt, sc_lt_alloc_cnt, SEC_LT_ENTRIES) #define SEC_PUT_BACK_LT(sc, num) \ SEC_ADD(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES, -(num)) #define SEC_FREE_LT(sc, num) \ SEC_ADD(sc, sc_lt_free_cnt, SEC_LT_ENTRIES, num) #define SEC_FREE_LT_CNT(sc) \ (((sc)->sc_lt_free_cnt - (sc)->sc_lt_alloc_cnt - 1) \ & (SEC_LT_ENTRIES - 1)) /* DMA Maping defines */ #define SEC_MEMORY 0 #define SEC_UIO 1 #define SEC_MBUF 2 /* Size of SEC registers area */ #define SEC_IO_SIZE 0x10000 /* SEC Controller registers */ #define SEC_IER 0x1008 #define SEC_INT_CH_DN(n) (1ULL << (((n) * 2) + 32)) #define SEC_INT_CH_ERR(n) (1ULL << (((n) * 2) + 33)) #define SEC_INT_ITO (1ULL << 55) #define SEC_ISR 0x1010 #define SEC_ICR 0x1018 #define SEC_ID 0x1020 #define SEC_EUASR 0x1028 #define SEC_EUASR_RNGU(r) (((r) >> 0) & 0xF) #define SEC_EUASR_PKEU(r) (((r) >> 8) & 0xF) #define SEC_EUASR_KEU(r) (((r) >> 16) & 0xF) #define SEC_EUASR_CRCU(r) (((r) >> 20) & 0xF) #define SEC_EUASR_DEU(r) (((r) >> 32) & 0xF) #define SEC_EUASR_AESU(r) (((r) >> 40) & 0xF) #define SEC_EUASR_MDEU(r) (((r) >> 48) & 0xF) #define SEC_EUASR_AFEU(r) (((r) >> 56) & 0xF) #define SEC_MCR 0x1030 #define SEC_MCR_SWR (1ULL << 32) /* SEC Channel registers */ #define SEC_CHAN_CCR(n) (((n) * 0x100) + 0x1108) #define SEC_CHAN_CCR_CDIE (1ULL << 1) #define SEC_CHAN_CCR_NT (1ULL << 2) #define SEC_CHAN_CCR_AWSE (1ULL << 3) #define SEC_CHAN_CCR_CDWE (1ULL << 4) #define SEC_CHAN_CCR_BS (1ULL << 8) #define SEC_CHAN_CCR_WGN (1ULL << 13) #define SEC_CHAN_CCR_R (1ULL << 32) #define SEC_CHAN_CCR_CON (1ULL << 33) #define SEC_CHAN_CSR(n) (((n) * 0x100) + 0x1110) #define SEC_CHAN_CSR2_FFLVL_M 0x1FULL #define SEC_CHAN_CSR2_FFLVL_S 56 #define SEC_CHAN_CSR2_GSTATE_M 0x0FULL #define SEC_CHAN_CSR2_GSTATE_S 48 #define SEC_CHAN_CSR2_PSTATE_M 0x0FULL #define SEC_CHAN_CSR2_PSTATE_S 40 #define SEC_CHAN_CSR2_MSTATE_M 0x3FULL #define SEC_CHAN_CSR2_MSTATE_S 32 #define SEC_CHAN_CSR3_FFLVL_M 0x1FULL #define SEC_CHAN_CSR3_FFLVL_S 24 #define SEC_CHAN_CSR3_MSTATE_M 0x1FFULL #define SEC_CHAN_CSR3_MSTATE_S 32 #define SEC_CHAN_CSR3_PSTATE_M 0x7FULL #define SEC_CHAN_CSR3_PSTATE_S 48 #define SEC_CHAN_CSR3_GSTATE_M 0x7FULL #define SEC_CHAN_CSR3_GSTATE_S 56 #define SEC_CHAN_CDPR(n) (((n) * 0x100) + 0x1140) #define SEC_CHAN_FF(n) (((n) * 0x100) + 0x1148) /* SEC Execution Units numbers */ #define SEC_EU_NONE 0x0 #define SEC_EU_AFEU 0x1 #define SEC_EU_DEU 0x2 #define SEC_EU_MDEU_A 0x3 #define SEC_EU_MDEU_B 0xB #define SEC_EU_RNGU 0x4 #define SEC_EU_PKEU 0x5 #define SEC_EU_AESU 0x6 #define SEC_EU_KEU 0x7 #define SEC_EU_CRCU 0x8 /* SEC descriptor types */ #define SEC_DT_COMMON_NONSNOOP 0x02 #define SEC_DT_HMAC_SNOOP 0x04 /* SEC AESU declarations and definitions */ #define SEC_AESU_MODE_ED (1ULL << 0) #define SEC_AESU_MODE_CBC (1ULL << 1) /* SEC DEU declarations and definitions */ #define SEC_DEU_MODE_ED (1ULL << 0) #define SEC_DEU_MODE_TS (1ULL << 1) #define SEC_DEU_MODE_CBC (1ULL << 2) /* SEC MDEU declarations and definitions */ #define SEC_HMAC_HASH_LEN 12 #define SEC_MDEU_MODE_SHA1 0x00 /* MDEU A */ #define SEC_MDEU_MODE_SHA384 0x00 /* MDEU B */ #define SEC_MDEU_MODE_SHA256 0x01 #define SEC_MDEU_MODE_MD5 0x02 /* MDEU A */ #define SEC_MDEU_MODE_SHA512 0x02 /* MDEU B */ #define SEC_MDEU_MODE_SHA224 0x03 #define SEC_MDEU_MODE_PD (1ULL << 2) #define SEC_MDEU_MODE_HMAC (1ULL << 3) #define SEC_MDEU_MODE_INIT (1ULL << 4) #define SEC_MDEU_MODE_SMAC (1ULL << 5) #define SEC_MDEU_MODE_CICV (1ULL << 6) #define SEC_MDEU_MODE_CONT (1ULL << 7) #endif Index: head/sys/dev/ubsec/ubsec.c =================================================================== --- head/sys/dev/ubsec/ubsec.c (revision 336438) +++ head/sys/dev/ubsec/ubsec.c (revision 336439) @@ -1,2861 +1,2798 @@ /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2000 Jason L. Wright (jason@thought.net) * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * uBsec 5[56]01, 58xx hardware crypto accelerator */ #include "opt_ubsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include /* grr, #defines for gratuitous incompatibility in queue.h */ #define SIMPLEQ_HEAD STAILQ_HEAD #define SIMPLEQ_ENTRY STAILQ_ENTRY #define SIMPLEQ_INIT STAILQ_INIT #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL #define SIMPLEQ_EMPTY STAILQ_EMPTY #define SIMPLEQ_FIRST STAILQ_FIRST #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD #define SIMPLEQ_FOREACH STAILQ_FOREACH /* ditto for endian.h */ #define letoh16(x) le16toh(x) #define letoh32(x) le32toh(x) #ifdef UBSEC_RNDTEST #include #endif #include #include /* * Prototypes and count for the pci_device structure */ static int ubsec_probe(device_t); static int ubsec_attach(device_t); static int ubsec_detach(device_t); static int ubsec_suspend(device_t); static int ubsec_resume(device_t); static int ubsec_shutdown(device_t); -static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *); -static int ubsec_freesession(device_t, u_int64_t); +static int ubsec_newsession(device_t, crypto_session_t, struct cryptoini *); static int ubsec_process(device_t, struct cryptop *, int); static int ubsec_kprocess(device_t, struct cryptkop *, int); static device_method_t ubsec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ubsec_probe), DEVMETHOD(device_attach, ubsec_attach), DEVMETHOD(device_detach, ubsec_detach), DEVMETHOD(device_suspend, ubsec_suspend), DEVMETHOD(device_resume, ubsec_resume), DEVMETHOD(device_shutdown, ubsec_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, ubsec_newsession), - DEVMETHOD(cryptodev_freesession,ubsec_freesession), DEVMETHOD(cryptodev_process, ubsec_process), DEVMETHOD(cryptodev_kprocess, ubsec_kprocess), DEVMETHOD_END }; static driver_t ubsec_driver = { "ubsec", ubsec_methods, sizeof (struct ubsec_softc) }; static devclass_t ubsec_devclass; DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); MODULE_DEPEND(ubsec, crypto, 1, 1, 1); #ifdef UBSEC_RNDTEST MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); #endif static void ubsec_intr(void *); static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); static void ubsec_feed(struct ubsec_softc *); static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_feed2(struct ubsec_softc *); static void ubsec_rng(void *); static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, struct ubsec_dma_alloc *, int); #define ubsec_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); static int ubsec_dmamap_aligned(struct ubsec_operand *op); static void ubsec_reset_board(struct ubsec_softc *sc); static void ubsec_init_board(struct ubsec_softc *sc); static void ubsec_init_pciregs(device_t dev); static void ubsec_totalreset(struct ubsec_softc *sc); static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_ksigbits(struct crparam *); static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); static void ubsec_dump_mcr(struct ubsec_mcr *); static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); static int ubsec_debug = 0; SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 0, "control debugging msgs"); #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) #define SWAP32(x) (x) = htole32(ntohl((x))) #define HTOLE32(x) (x) = htole32(x) struct ubsec_stats ubsecstats; SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, ubsec_stats, "driver statistics"); static int ubsec_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825 )) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* ubsec_partname(struct ubsec_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_BROADCOM: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; case PCI_PRODUCT_BROADCOM_5825: return "Broadcom 5825"; } return "Broadcom unknown-part"; case PCI_VENDOR_BLUESTEEL: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; } return "Bluesteel unknown-part"; case PCI_VENDOR_SUN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; } return "Sun unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_UBSEC); } static int ubsec_attach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_dma *dmap; u_int32_t i; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_q2free); /* XXX handle power management */ sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825)) || (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { /* NB: the 5821/5822 defines some additional status bits */ sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ubsec_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ubsec_session), + CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 0x3ffff, /* maxsize */ UBS_MAX_SCATTER, /* nsegments */ 0xffff, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { device_printf(dev, "cannot allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { device_printf(dev, "cannot allocate dma buffers\n"); free(q, M_DEVBUF); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } mtx_init(&sc->sc_mcr1lock, device_get_nameunit(dev), "mcr1 operations", MTX_DEF); mtx_init(&sc->sc_freeqlock, device_get_nameunit(dev), "mcr1 free q", MTX_DEF); device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(dev); /* * Init Broadcom chip */ ubsec_init_board(sc); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { sc->sc_statmask |= BS_STAT_MCR2_DONE; #ifdef UBSEC_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); skip_rng: ; } #endif /* UBSEC_NO_RNG */ mtx_init(&sc->sc_mcr2lock, device_get_nameunit(dev), "mcr2 operations", MTX_DEF); if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); #if 0 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int ubsec_detach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ /* disable interrupts */ WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~ (BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR)); callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef UBSEC_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { struct ubsec_q *q; q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); ubsec_dma_free(sc, &q->q_dma->d_alloc); free(q, M_DEVBUF); } mtx_destroy(&sc->sc_mcr1lock); mtx_destroy(&sc->sc_freeqlock); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_buf); } #endif /* UBSEC_NO_RNG */ mtx_destroy(&sc->sc_mcr2lock); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int ubsec_shutdown(device_t dev) { #ifdef notyet ubsec_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int ubsec_suspend(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int ubsec_resume(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * UBSEC Interrupt routine */ static void ubsec_intr(void *arg) { struct ubsec_softc *sc = arg; volatile u_int32_t stat; struct ubsec_q *q; struct ubsec_dma *dmap; int npkts = 0, i; stat = READ_REG(sc, BS_STAT); stat &= sc->sc_statmask; if (stat == 0) return; WRITE_REG(sc, BS_STAT, stat); /* IACK */ /* * Check to see if we have any packets waiting for us */ if ((stat & BS_STAT_MCR1_DONE)) { mtx_lock(&sc->sc_mcr1lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); dmap = q->q_dma; if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); npkts = q->q_nstacked_mcrs; sc->sc_nqchip -= 1+npkts; /* * search for further sc_qchip ubsec_q's that share * the same MCR, and complete them too, they must be * at the top. */ for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { ubsec_callback(sc, q->q_stacked_mcr[i]); } else { break; } } ubsec_callback(sc, q); } /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } /* * Check to see if we have any key setups/rng's waiting for us */ if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && (stat & BS_STAT_MCR2_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; mtx_lock(&sc->sc_mcr2lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed2(sc); } mtx_unlock(&sc->sc_mcr2lock); } /* * Check to see if we got any DMA Error */ if (stat & BS_STAT_DMAERR) { #ifdef UBSEC_DEBUG if (ubsec_debug) { volatile u_int32_t a = READ_REG(sc, BS_ERR); printf("dmaerr %s@%08x\n", (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR); } #endif /* UBSEC_DEBUG */ ubsecstats.hst_dmaerr++; mtx_lock(&sc->sc_mcr1lock); ubsec_totalreset(sc); ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup; mtx_lock(&sc->sc_freeqlock); wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef UBSEC_DEBUG if (ubsec_debug) device_printf(sc->sc_dev, "wakeup crypto (%x)\n", sc->sc_needwakeup); #endif /* UBSEC_DEBUG */ sc->sc_needwakeup &= ~wakeup; mtx_unlock(&sc->sc_freeqlock); crypto_unblock(sc->sc_cid, wakeup); } } /* * ubsec_feed() - aggregate and post requests to chip */ static void ubsec_feed(struct ubsec_softc *sc) { struct ubsec_q *q, *q2; int npkts, i; void *v; u_int32_t stat; /* * Decide how many ops to combine in a single MCR. We cannot * aggregate more than UBS_MAX_AGGR because this is the number * of slots defined in the data structure. Note that * aggregation only happens if ops are marked batch'able. * Aggregating ops reduces the number of interrupts to the host * but also (potentially) increases the latency for processing * completed ops as we only get an interrupt when all aggregated * ops have completed. */ if (sc->sc_nqueue == 0) return; if (sc->sc_nqueue > 1) { npkts = 0; SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { npkts++; if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) break; } } else npkts = 1; /* * Check device status before going any further. */ if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if (stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } else ubsecstats.hst_mcr1full++; return; } if (sc->sc_nqueue > ubsecstats.hst_maxqueue) ubsecstats.hst_maxqueue = sc->sc_nqueue; if (npkts > UBS_MAX_AGGR) npkts = UBS_MAX_AGGR; if (npkts < 2) /* special case 1 op */ goto feed1; ubsecstats.hst_totbatch += npkts-1; #ifdef UBSEC_DEBUG if (ubsec_debug) printf("merging %d records\n", npkts); #endif /* UBSEC_DEBUG */ q = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ for (i = 0; i < q->q_nstacked_mcrs; i++) { q2 = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, BUS_DMASYNC_PREWRITE); if (q2->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, BUS_DMASYNC_PREREAD); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - sizeof(struct ubsec_mcr_add)); bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); q->q_stacked_mcr[i] = q2; } q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip += npkts; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); return; feed1: q = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("feed1: q->chip %p %08x stat %08x\n", q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), stat); #endif /* UBSEC_DEBUG */ SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip++; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; return; } static void ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key) { /* Go ahead and compute key in ubsec's byte order */ if (algo == CRYPTO_DES_CBC) { bcopy(key, &ses->ses_deskey[0], 8); bcopy(key, &ses->ses_deskey[2], 8); bcopy(key, &ses->ses_deskey[4], 8); } else bcopy(key, ses->ses_deskey, 24); SWAP32(ses->ses_deskey[0]); SWAP32(ses->ses_deskey[1]); SWAP32(ses->ses_deskey[2]); SWAP32(ses->ses_deskey[3]); SWAP32(ses->ses_deskey[4]); SWAP32(ses->ses_deskey[5]); } static void ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen) { MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i; for (i = 0; i < klen; i++) key[i] ^= HMAC_IPAD_VAL; if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_ipad_buffer, MD5_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_ipad_buffer, SHA1_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (algo == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, key, klen); MD5Update(&md5ctx, hmac_opad_buffer, MD5_BLOCK_LEN - klen); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, key, klen); SHA1Update(&sha1ctx, hmac_opad_buffer, SHA1_BLOCK_LEN - klen); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); } for (i = 0; i < klen; i++) key[i] ^= HMAC_OPAD_VAL; } /* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ static int -ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +ubsec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct ubsec_softc *sc = device_get_softc(dev); struct cryptoini *c, *encini = NULL, *macini = NULL; struct ubsec_session *ses = NULL; - int sesn; - if (sidp == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); - if (sc->sc_sessions == NULL) { - ses = sc->sc_sessions = (struct ubsec_session *)malloc( - sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - sesn = 0; - sc->sc_nsessions = 1; - } else { - for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { - if (sc->sc_sessions[sesn].ses_used == 0) { - ses = &sc->sc_sessions[sesn]; - break; - } - } - - if (ses == NULL) { - sesn = sc->sc_nsessions; - ses = (struct ubsec_session *)malloc((sesn + 1) * - sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - bcopy(sc->sc_sessions, ses, sesn * - sizeof(struct ubsec_session)); - bzero(sc->sc_sessions, sesn * - sizeof(struct ubsec_session)); - free(sc->sc_sessions, M_DEVBUF); - sc->sc_sessions = ses; - ses = &sc->sc_sessions[sesn]; - sc->sc_nsessions++; - } - } - bzero(ses, sizeof(struct ubsec_session)); - ses->ses_used = 1; - + ses = crypto_get_driver_session(cses); if (encini) { /* get an IV, network byte order */ /* XXX may read fewer than requested */ read_random(ses->ses_iv, sizeof(ses->ses_iv)); if (encini->cri_key != NULL) { ubsec_setup_enckey(ses, encini->cri_alg, encini->cri_key); } } if (macini) { ses->ses_mlen = macini->cri_mlen; if (ses->ses_mlen == 0) { if (macini->cri_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } if (macini->cri_key != NULL) { ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key, macini->cri_klen / 8); } } - *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); return (0); } -/* - * Deallocate a session. - */ -static int -ubsec_freesession(device_t dev, u_int64_t tid) -{ - struct ubsec_softc *sc = device_get_softc(dev); - int session, ret; - u_int32_t sid = CRYPTO_SESID2LID(tid); - - if (sc == NULL) - return (EINVAL); - - session = UBSEC_SESSION(sid); - if (session < sc->sc_nsessions) { - bzero(&sc->sc_sessions[session], - sizeof(sc->sc_sessions[session])); - ret = 0; - } else - ret = EINVAL; - - return (ret); -} - static void ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) { struct ubsec_operand *op = arg; KASSERT(nsegs <= UBS_MAX_SCATTER, ("Too many DMA segments returned when mapping operand")); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("ubsec_op_cb: mapsize %u nsegs %d error %d\n", (u_int) mapsize, nsegs, error); #endif if (error != 0) return; op->mapsize = mapsize; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int ubsec_process(device_t dev, struct cryptop *crp, int hint) { struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_q *q = NULL; int err = 0, i, j, nicealign; struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; int encoffset = 0, macoffset = 0, cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses; struct ubsec_pktctx ctx; struct ubsec_dma *dmap = NULL; if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { ubsecstats.hst_invalid++; return (EINVAL); } - if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { - ubsecstats.hst_badsession++; - return (EINVAL); - } mtx_lock(&sc->sc_freeqlock); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_freeqlock); return (ERESTART); } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); mtx_unlock(&sc->sc_freeqlock); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&ctx, sizeof(ctx)); - q->q_sesn = UBSEC_SESSION(crp->crp_sid); q->q_dma = dmap; - ses = &sc->sc_sessions[q->q_sesn]; + ses = crypto_get_driver_session(crp->crp_session); if (crp->crp_flags & CRYPTO_F_IMBUF) { q->q_src_m = (struct mbuf *)crp->crp_buf; q->q_dst_m = (struct mbuf *)crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { q->q_src_io = (struct uio *)crp->crp_buf; q->q_dst_io = (struct uio *)crp->crp_buf; } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; /* XXX we don't handle contiguous blocks! */ } bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; crd1 = crp->crp_desc; if (crd1 == NULL) { ubsecstats.hst_nodesc++; err = EINVAL; goto errout; } crd2 = crd1->crd_next; if (crd2 == NULL) { if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) { maccrd = crd1; enccrd = NULL; } else if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) { maccrd = NULL; enccrd = crd1; } else { ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } else { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC) && ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { maccrd = crd1; enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC) && (crd1->crd_flags & CRD_F_ENCRYPT)) { enccrd = crd1; maccrd = crd2; } else { /* * We cannot order the ubsec as requested */ ubsecstats.hst_badalg++; err = EINVAL; goto errout; } } if (enccrd) { if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { ubsec_setup_enckey(ses, enccrd->crd_alg, enccrd->crd_key); } encoffset = enccrd->crd_skip; ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); if (enccrd->crd_flags & CRD_F_ENCRYPT) { q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else { ctx.pc_iv[0] = ses->ses_iv[0]; ctx.pc_iv[1] = ses->ses_iv[1]; } if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } } else { ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(enccrd->crd_iv, ctx.pc_iv, 8); else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv); } } ctx.pc_deskey[0] = ses->ses_deskey[0]; ctx.pc_deskey[1] = ses->ses_deskey[1]; ctx.pc_deskey[2] = ses->ses_deskey[2]; ctx.pc_deskey[3] = ses->ses_deskey[3]; ctx.pc_deskey[4] = ses->ses_deskey[4]; ctx.pc_deskey[5] = ses->ses_deskey[5]; SWAP32(ctx.pc_iv[0]); SWAP32(ctx.pc_iv[1]); } if (maccrd) { if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { ubsec_setup_mackey(ses, maccrd->crd_alg, maccrd->crd_key, maccrd->crd_klen / 8); } macoffset = maccrd->crd_skip; if (maccrd->crd_alg == CRYPTO_MD5_HMAC) ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); else ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { ctx.pc_hminner[i] = ses->ses_hminner[i]; ctx.pc_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(ctx.pc_hminner[i]); HTOLE32(ctx.pc_hmouter[i]); } } if (enccrd && maccrd) { /* * ubsec cannot handle packets where the end of encryption * and authentication are not the same, or where the * encrypted part begins before the authenticated part. */ if ((encoffset + enccrd->crd_len) != (macoffset + maccrd->crd_len)) { ubsecstats.hst_lenmismatch++; err = EINVAL; goto errout; } if (enccrd->crd_skip < maccrd->crd_skip) { ubsecstats.hst_skipmismatch++; err = EINVAL; goto errout; } sskip = maccrd->crd_skip; cpskip = dskip = enccrd->crd_skip; stheend = maccrd->crd_len; dtheend = enccrd->crd_len; coffset = enccrd->crd_skip - maccrd->crd_skip; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("mac: skip %d, len %d, inject %d\n", maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); printf("enc: skip %d, len %d, inject %d\n", enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); } #endif } else { cpskip = dskip = sskip = macoffset + encoffset; dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; cpoffset = cpskip + dtheend; coffset = 0; } ctx.pc_offset = htole16(coffset >> 2); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (crp->crp_flags & CRYPTO_F_IMBUF) { if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IOV) { if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } nicealign = ubsec_dmamap_aligned(&q->q_src); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("src skip: %d nicealign: %u\n", sskip, nicealign); #endif for (i = j = 0; i < q->q_src_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_segs[i].ds_len; bus_addr_t packp = q->q_src_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (enccrd == NULL && maccrd != NULL) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else { if (crp->crp_flags & CRYPTO_F_IOV) { if (!nicealign) { ubsecstats.hst_iovmisaligned++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_flags & CRYPTO_F_IMBUF) { if (nicealign) { q->q_dst = q->q_src; } else { int totlen, len; struct mbuf *m, *top, **mp; ubsecstats.hst_unaligned++; totlen = q->q_src_mapsize; if (totlen >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, q->q_src_m->m_flags & M_PKTHDR); len = MCLBYTES; } else if (q->q_src_m->m_flags & M_PKTHDR) { m = m_gethdr(M_NOWAIT, MT_DATA); len = MHLEN; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } if (m && q->q_src_m->m_flags & M_PKTHDR && !m_dup_pkthdr(m, q->q_src_m, M_NOWAIT)) { m_free(m); m = NULL; } if (m == NULL) { ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } m->m_len = len = min(totlen, len); totlen -= len; top = m; mp = ⊤ while (totlen > 0) { if (totlen >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, 0); len = MCLBYTES; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } if (m == NULL) { m_freem(top); ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } q->q_dst_m = top; ubsec_mcopy(q->q_src_m, q->q_dst_m, cpskip, cpoffset); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_dst_map, q->q_dst_m, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } } } else { ubsecstats.hst_badflags++; err = EINVAL; goto errout; } #ifdef UBSEC_DEBUG if (ubsec_debug) printf("dst skip: %d\n", dskip); #endif for (i = j = 0; i < q->q_dst_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_dst_segs[i].ds_len; bus_addr_t packp = q->q_dst_segs[i].ds_addr; if (dskip >= packl) { dskip -= packl; continue; } packl -= dskip; packp += dskip; dskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_opktbuf; else pb = &dmap->d_dma->d_dbuf[j - 1]; pb->pb_addr = htole32(packp); if (dtheend) { if (packl > dtheend) { pb->pb_len = htole32(dtheend); dtheend = 0; } else { pb->pb_len = htole32(packl); dtheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_dst_nsegs) { if (maccrd) pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); else pb->pb_next = 0; } else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_dbuf[j])); j++; } } dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_ctx)); if (sc->sc_flags & UBS_FLAGS_LONGCTX) { struct ubsec_pktctx_long *ctxl; ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx)); /* transform small context into long context */ ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); ctxl->pc_flags = ctx.pc_flags; ctxl->pc_offset = ctx.pc_offset; for (i = 0; i < 6; i++) ctxl->pc_deskey[i] = ctx.pc_deskey[i]; for (i = 0; i < 5; i++) ctxl->pc_hminner[i] = ctx.pc_hminner[i]; for (i = 0; i < 5; i++) ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; ctxl->pc_iv[0] = ctx.pc_iv[0]; ctxl->pc_iv[1] = ctx.pc_iv[1]; } else bcopy(&ctx, dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx), sizeof(struct ubsec_pktctx)); mtx_lock(&sc->sc_mcr1lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); sc->sc_nqueue++; ubsecstats.hst_ipackets++; ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); return (0); errout: if (q != NULL) { if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } if (q->q_src_map != NULL) { bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); } } if (q != NULL || err == ERESTART) { mtx_lock(&sc->sc_freeqlock); if (q != NULL) SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); if (err == ERESTART) sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_freeqlock); } if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return (err); } static void ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) { struct cryptop *crp = (struct cryptop *)q->q_crp; + struct ubsec_session *ses; struct cryptodesc *crd; struct ubsec_dma *dmap = q->q_dma; + ses = crypto_get_driver_session(crp->crp_session); + ubsecstats.hst_opackets++; ubsecstats.hst_obytes += dmap->d_alloc.dma_size; ubsec_dma_sync(&dmap->d_alloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { m_freem(q->q_src_m); crp->crp_buf = (caddr_t)q->q_dst_m; } /* copy out IV for future use */ if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_DES_CBC && crd->crd_alg != CRYPTO_3DES_CBC) continue; crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip + crd->crd_len - 8, 8, - (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); + (caddr_t)ses->ses_iv); break; } } for (crd = crp->crp_desc; crd; crd = crd->crd_next) { if (crd->crd_alg != CRYPTO_MD5_HMAC && crd->crd_alg != CRYPTO_SHA1_HMAC) continue; crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, - sc->sc_sessions[q->q_sesn].ses_mlen, - (caddr_t)dmap->d_dma->d_macbuf); + ses->ses_mlen, (caddr_t)dmap->d_dma->d_macbuf); break; } mtx_lock(&sc->sc_freeqlock); SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); mtx_unlock(&sc->sc_freeqlock); crypto_done(crp); } static void ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) { int i, j, dlen, slen; caddr_t dptr, sptr; j = 0; sptr = srcm->m_data; slen = srcm->m_len; dptr = dstm->m_data; dlen = dstm->m_len; while (1) { for (i = 0; i < min(slen, dlen); i++) { if (j < hoffset || j >= toffset) *dptr++ = *sptr++; slen--; dlen--; j++; } if (slen == 0) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } if (dlen == 0) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } } } /* * feed the key generator, must be called at splimp() or higher. */ static int ubsec_feed2(struct ubsec_softc *sc) { struct ubsec_q2 *q; while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) break; q = SIMPLEQ_FIRST(&sc->sc_queue2); ubsec_dma_sync(&q->q_mcr, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next); --sc->sc_nqueue2; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); } return (0); } /* * Callback for handling random numbers */ static void ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) { struct cryptkop *krp; struct ubsec_ctx_keyop *ctx; ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); switch (q->q_type) { #ifndef UBSEC_NO_RNG case UBS_CTXOP_RNGBYPASS: { struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); (*sc->sc_harvest)(sc->sc_rndtest, rng->rng_buf.dma_vaddr, UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); rng->rng_used = 0; callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); break; } #endif case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; u_int rlen, clen; krp = me->me_krp; rlen = (me->me_modbits + 7) / 8; clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); if (clen < rlen) krp->krp_status = E2BIG; else { if (sc->sc_flags & UBS_FLAGS_HWNORM) { bzero(krp->krp_param[krp->krp_iparams].crp_p, (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8); bcopy(me->me_C.dma_vaddr, krp->krp_param[krp->krp_iparams].crp_p, (me->me_modbits + 7) / 8); } else ubsec_kshift_l(me->me_shiftbits, me->me_C.dma_vaddr, me->me_normbits, krp->krp_param[krp->krp_iparams].crp_p, krp->krp_param[krp->krp_iparams].crp_nbits); } crypto_kdone(krp); /* bzero all potentially sensitive data */ bzero(me->me_E.dma_vaddr, me->me_E.dma_size); bzero(me->me_M.dma_vaddr, me->me_M.dma_size); bzero(me->me_C.dma_vaddr, me->me_C.dma_size); bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; u_int len; krp = rp->rpr_krp; ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; bcopy(rp->rpr_msgout.dma_vaddr, krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); crypto_kdone(krp); bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); break; } default: device_printf(sc->sc_dev, "unknown ctx op: %x\n", letoh16(ctx->ctx_op)); break; } } #ifndef UBSEC_NO_RNG static void ubsec_rng(void *vsc) { struct ubsec_softc *sc = vsc; struct ubsec_q2_rng *rng = &sc->sc_rng; struct ubsec_mcr *mcr; struct ubsec_ctx_rngbypass *ctx; mtx_lock(&sc->sc_mcr2lock); if (rng->rng_used) { mtx_unlock(&sc->sc_mcr2lock); return; } sc->sc_nqueue2++; if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) goto out; mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = 0; mcr->mcr_reserved = mcr->mcr_pktlen = 0; mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & UBS_PKTBUF_LEN); mcr->mcr_opktbuf.pb_next = 0; ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); rng->rng_used = 1; ubsec_feed2(sc); ubsecstats.hst_rng++; mtx_unlock(&sc->sc_mcr2lock); return; out: /* * Something weird happened, generate our own call back. */ sc->sc_nqueue2--; mtx_unlock(&sc->sc_mcr2lock); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); } #endif /* UBSEC_NO_RNG */ static void ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int ubsec_dma_malloc( struct ubsec_softc *sc, bus_size_t size, struct ubsec_dma_alloc *dma, int mapflags ) { int r; /* XXX could specify sc_dmat as parent but that just adds overhead */ r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_1; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (intmax_t)size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ubsec_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; return (r); } static void ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void ubsec_reset_board(struct ubsec_softc *sc) { volatile u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl |= BS_CTRL_RESET; WRITE_REG(sc, BS_CTRL, ctrl); /* * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us */ DELAY(10); } /* * Init Broadcom registers */ static void ubsec_init_board(struct ubsec_softc *sc) { u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) ctrl |= BS_CTRL_MCR2INT; else ctrl &= ~BS_CTRL_MCR2INT; if (sc->sc_flags & UBS_FLAGS_HWNORM) ctrl &= ~BS_CTRL_SWNORM; WRITE_REG(sc, BS_CTRL, ctrl); } /* * Init Broadcom PCI registers */ static void ubsec_init_pciregs(device_t dev) { #if 0 u_int32_t misc; misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); #endif /* * This will set the cache line size to 1, this will * force the BCM58xx chip just to do burst read/writes. * Cache line read/writes are to slow */ pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void ubsec_cleanchip(struct ubsec_softc *sc) { struct ubsec_q *q; while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); ubsec_free_q(sc, q); } sc->sc_nqchip = 0; } /* * free a ubsec_q * It is assumed that the caller is within splimp(). */ static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) { struct ubsec_q *q2; struct cryptop *crp; int npkts; int i; npkts = q->q_nstacked_mcrs; for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { q2 = q->q_stacked_mcr[i]; if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) m_freem(q2->q_dst_m); crp = (struct cryptop *)q2->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); crp->crp_etype = EFAULT; crypto_done(crp); } else { break; } } /* * Free header MCR */ if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) m_freem(q->q_dst_m); crp = (struct cryptop *)q->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void ubsec_totalreset(struct ubsec_softc *sc) { ubsec_reset_board(sc); ubsec_init_board(sc); ubsec_cleanchip(sc); } static int ubsec_dmamap_aligned(struct ubsec_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static void ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) { switch (q->q_type) { case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; ubsec_dma_free(sc, &me->me_q.q_mcr); ubsec_dma_free(sc, &me->me_q.q_ctx); ubsec_dma_free(sc, &me->me_M); ubsec_dma_free(sc, &me->me_E); ubsec_dma_free(sc, &me->me_C); ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; ubsec_dma_free(sc, &rp->rpr_q.q_mcr); ubsec_dma_free(sc, &rp->rpr_q.q_ctx); ubsec_dma_free(sc, &rp->rpr_msgin); ubsec_dma_free(sc, &rp->rpr_msgout); free(rp, M_DEVBUF); break; } default: device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); break; } } static int ubsec_kprocess(device_t dev, struct cryptkop *krp, int hint) { struct ubsec_softc *sc = device_get_softc(dev); int r; if (krp == NULL || krp->krp_callback == NULL) return (EINVAL); while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { struct ubsec_q2 *q; q = SIMPLEQ_FIRST(&sc->sc_q2free); SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next); ubsec_kfree(sc, q); } switch (krp->krp_op) { case CRK_MOD_EXP: if (sc->sc_flags & UBS_FLAGS_HWNORM) r = ubsec_kprocess_modexp_hw(sc, krp, hint); else r = ubsec_kprocess_modexp_sw(sc, krp, hint); break; case CRK_MOD_EXP_CRT: return (ubsec_kprocess_rsapriv(sc, krp, hint)); default: device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", krp->krp_op); krp->krp_status = EOPNOTSUPP; crypto_kdone(krp); return (0); } return (0); /* silence compiler */ } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) */ static int ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, me->me_M.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, me->me_E.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32(normbits / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, ctx->me_N, normbits); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(nbits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexp++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_tag != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_tag != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_tag != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_tag != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_tag != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) */ static int ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; /* XXX ??? */ me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } bzero(me->me_M.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, me->me_M.dma_vaddr, (mbits + 7) / 8); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } bzero(me->me_E.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, me->me_E.dma_vaddr, (ebits + 7) / 8); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32((ebits + 7) / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, (nbits + 7) / 8); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(ebits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_tag != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_tag != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_tag != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_tag != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_tag != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } static int ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_rsapriv *rp = NULL; struct ubsec_mcr *mcr; struct ubsec_ctx_rsapriv *ctx; int err = 0; u_int padlen, msglen; msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); if (msglen > padlen) padlen = msglen; if (padlen <= 256) padlen = 256; else if (padlen <= 384) padlen = 384; else if (padlen <= 512) padlen = 512; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) padlen = 768; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) padlen = 1024; else { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { err = E2BIG; goto errout; } rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); if (rp == NULL) return (ENOMEM); bzero(rp, sizeof *rp); rp->rpr_krp = krp; rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &rp->rpr_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), &rp->rpr_q.q_ctx, 0)) { err = ENOMEM; goto errout; } ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; bzero(ctx, sizeof *ctx); /* Copy in p */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, &ctx->rpr_buf[0 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); /* Copy in q */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, &ctx->rpr_buf[1 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); /* Copy in dp */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, &ctx->rpr_buf[2 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); /* Copy in dq */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, &ctx->rpr_buf[3 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); /* Copy in pinv */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, &ctx->rpr_buf[4 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); msglen = padlen * 2; /* Copy in input message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, rp->rpr_msgin.dma_vaddr, (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); /* Prepare space for output message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); mcr->mcr_reserved = 0; mcr->mcr_pktlen = htole16(msglen); mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); #ifdef DIAGNOSTIC if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { panic("%s: rsapriv: invalid msgin %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size); } if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { panic("%s: rsapriv: invalid msgout %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size); } #endif ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); ctx->rpr_q_len = htole16(padlen); ctx->rpr_p_len = htole16(padlen); /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexpcrt++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (rp != NULL) { if (rp->rpr_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &rp->rpr_q.q_mcr); if (rp->rpr_msgin.dma_tag != NULL) { bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); ubsec_dma_free(sc, &rp->rpr_msgin); } if (rp->rpr_msgout.dma_tag != NULL) { bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); ubsec_dma_free(sc, &rp->rpr_msgout); } free(rp, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) { printf("addr 0x%x (0x%x) next 0x%x\n", pb->pb_addr, pb->pb_len, pb->pb_next); } static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) { printf("CTX (0x%x):\n", c->ctx_len); switch (letoh16(c->ctx_op)) { case UBS_CTXOP_RNGBYPASS: case UBS_CTXOP_RNGSHA1: break; case UBS_CTXOP_MODEXP: { struct ubsec_ctx_modexp *cx = (void *)c; int i, len; printf(" Elen %u, Nlen %u\n", letoh16(cx->me_E_len), letoh16(cx->me_N_len)); len = (cx->me_N_len + 7)/8; for (i = 0; i < len; i++) printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); printf("\n"); break; } default: printf("unknown context: %x\n", c->ctx_op); } printf("END CTX\n"); } static void ubsec_dump_mcr(struct ubsec_mcr *mcr) { volatile struct ubsec_mcr_add *ma; int i; printf("MCR:\n"); printf(" pkts: %u, flags 0x%x\n", letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), letoh16(ma->mcr_reserved)); printf(" %d: ipkt ", i); ubsec_dump_pb(&ma->mcr_ipktbuf); printf(" %d: opkt ", i); ubsec_dump_pb(&ma->mcr_opktbuf); ma++; } printf("END MCR\n"); } #endif /* UBSEC_DEBUG */ /* * Return the number of significant bits of a big number. */ static int ubsec_ksigbits(struct crparam *cr) { u_int plen = (cr->crp_nbits + 7) / 8; int i, sig = plen * 8; u_int8_t c, *p = cr->crp_p; for (i = plen - 1; i >= 0; i--) { c = p[i]; if (c != 0) { while ((c & 0x80) == 0) { sig--; c <<= 1; } break; } sig -= 8; } return (sig); } static void ubsec_kshift_r( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { u_int slen, dlen; int i, si, di, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; for (i = 0; i < slen; i++) dst[i] = src[i]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits / 8; if (n != 0) { si = dlen - n - 1; di = dlen - 1; while (si >= 0) dst[di--] = dst[si--]; while (di >= 0) dst[di--] = 0; } n = shiftbits % 8; if (n != 0) { for (i = dlen - 1; i > 0; i--) dst[i] = (dst[i] << n) | (dst[i - 1] >> (8 - n)); dst[0] = dst[0] << n; } } static void ubsec_kshift_l( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { int slen, dlen, i, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; n = shiftbits / 8; for (i = 0; i < slen; i++) dst[i] = src[i + n]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits % 8; if (n != 0) { for (i = 0; i < (dlen - 1); i++) dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); dst[dlen - 1] = dst[dlen - 1] >> n; } } Index: head/sys/dev/ubsec/ubsecvar.h =================================================================== --- head/sys/dev/ubsec/ubsecvar.h (revision 336438) +++ head/sys/dev/ubsec/ubsecvar.h (revision 336439) @@ -1,262 +1,254 @@ /* $FreeBSD$ */ /* $OpenBSD: ubsecvar.h,v 1.35 2002/09/24 18:33:26 jason Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2000 Theo de Raadt * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ /* Maximum queue length */ #ifndef UBS_MAX_NQUEUE #define UBS_MAX_NQUEUE 60 #endif #define UBS_MAX_SCATTER 64 /* Maximum scatter/gather depth */ #ifndef UBS_MAX_AGGR #define UBS_MAX_AGGR 5 /* Maximum aggregation count */ #endif -#define UBSEC_CARD(sid) (((sid) & 0xf0000000) >> 28) -#define UBSEC_SESSION(sid) ( (sid) & 0x0fffffff) -#define UBSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff)) - #define UBS_DEF_RTY 0xff /* PCI Retry Timeout */ #define UBS_DEF_TOUT 0xff /* PCI TRDY Timeout */ #define UBS_DEF_CACHELINE 0x01 /* Cache Line setting */ #ifdef _KERNEL struct ubsec_dma_alloc { u_int32_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; struct ubsec_q2 { SIMPLEQ_ENTRY(ubsec_q2) q_next; struct ubsec_dma_alloc q_mcr; struct ubsec_dma_alloc q_ctx; u_int q_type; }; struct ubsec_q2_rng { struct ubsec_q2 rng_q; struct ubsec_dma_alloc rng_buf; int rng_used; }; /* C = (M ^ E) mod N */ #define UBS_MODEXP_PAR_M 0 #define UBS_MODEXP_PAR_E 1 #define UBS_MODEXP_PAR_N 2 #define UBS_MODEXP_PAR_C 3 struct ubsec_q2_modexp { struct ubsec_q2 me_q; struct cryptkop * me_krp; struct ubsec_dma_alloc me_M; struct ubsec_dma_alloc me_E; struct ubsec_dma_alloc me_C; struct ubsec_dma_alloc me_epb; int me_modbits; int me_shiftbits; int me_normbits; }; #define UBS_RSAPRIV_PAR_P 0 #define UBS_RSAPRIV_PAR_Q 1 #define UBS_RSAPRIV_PAR_DP 2 #define UBS_RSAPRIV_PAR_DQ 3 #define UBS_RSAPRIV_PAR_PINV 4 #define UBS_RSAPRIV_PAR_MSGIN 5 #define UBS_RSAPRIV_PAR_MSGOUT 6 struct ubsec_q2_rsapriv { struct ubsec_q2 rpr_q; struct cryptkop * rpr_krp; struct ubsec_dma_alloc rpr_msgin; struct ubsec_dma_alloc rpr_msgout; }; #define UBSEC_RNG_BUFSIZ 16 /* measured in 32bit words */ struct ubsec_dmachunk { struct ubsec_mcr d_mcr; struct ubsec_mcr_add d_mcradd[UBS_MAX_AGGR-1]; struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1]; struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1]; u_int32_t d_macbuf[5]; union { struct ubsec_pktctx_long ctxl; struct ubsec_pktctx ctx; } d_ctx; }; struct ubsec_dma { SIMPLEQ_ENTRY(ubsec_dma) d_next; struct ubsec_dmachunk *d_dma; struct ubsec_dma_alloc d_alloc; }; #define UBS_FLAGS_KEY 0x01 /* has key accelerator */ #define UBS_FLAGS_LONGCTX 0x02 /* uses long ipsec ctx */ #define UBS_FLAGS_BIGKEY 0x04 /* 2048bit keys */ #define UBS_FLAGS_HWNORM 0x08 /* hardware normalization */ #define UBS_FLAGS_RNG 0x10 /* hardware rng */ struct ubsec_operand { union { struct mbuf *m; struct uio *io; } u; bus_dmamap_t map; bus_size_t mapsize; int nsegs; bus_dma_segment_t segs[UBS_MAX_SCATTER]; }; struct ubsec_q { SIMPLEQ_ENTRY(ubsec_q) q_next; int q_nstacked_mcrs; struct ubsec_q *q_stacked_mcr[UBS_MAX_AGGR-1]; struct cryptop *q_crp; struct ubsec_dma *q_dma; struct ubsec_operand q_src; struct ubsec_operand q_dst; - int q_sesn; int q_flags; }; #define q_src_m q_src.u.m #define q_src_io q_src.u.io #define q_src_map q_src.map #define q_src_nsegs q_src.nsegs #define q_src_segs q_src.segs #define q_src_mapsize q_src.mapsize #define q_dst_m q_dst.u.m #define q_dst_io q_dst.u.io #define q_dst_map q_dst.map #define q_dst_nsegs q_dst.nsegs #define q_dst_segs q_dst.segs #define q_dst_mapsize q_dst.mapsize struct rndstate_test; struct ubsec_softc { device_t sc_dev; /* device backpointer */ struct resource *sc_irq; void *sc_ih; /* interrupt handler cookie */ bus_space_handle_t sc_sh; /* memory handle */ bus_space_tag_t sc_st; /* memory tag */ struct resource *sc_sr; /* memory resource */ bus_dma_tag_t sc_dmat; /* dma tag */ int sc_flags; /* device specific flags */ int sc_suspended; int sc_needwakeup; /* notify crypto layer */ u_int32_t sc_statmask; /* interrupt status mask */ int32_t sc_cid; /* crypto tag */ struct mtx sc_mcr1lock; /* mcr1 operation lock */ SIMPLEQ_HEAD(,ubsec_q) sc_queue; /* packet queue, mcr1 */ int sc_nqueue; /* count enqueued, mcr1 */ SIMPLEQ_HEAD(,ubsec_q) sc_qchip; /* on chip, mcr1 */ int sc_nqchip; /* count on chip, mcr1 */ struct mtx sc_freeqlock; /* freequeue lock */ SIMPLEQ_HEAD(,ubsec_q) sc_freequeue; /* list of free queue elements */ struct mtx sc_mcr2lock; /* mcr2 operation lock */ SIMPLEQ_HEAD(,ubsec_q2) sc_queue2; /* packet queue, mcr2 */ int sc_nqueue2; /* count enqueued, mcr2 */ SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2; /* on chip, mcr2 */ - int sc_nsessions; /* # of sessions */ - struct ubsec_session *sc_sessions; /* sessions */ struct callout sc_rngto; /* rng timeout */ int sc_rnghz; /* rng poll time */ struct ubsec_q2_rng sc_rng; struct rndtest_state *sc_rndtest; /* RNG test state */ void (*sc_harvest)(struct rndtest_state *, void *, u_int); struct ubsec_dma sc_dmaa[UBS_MAX_NQUEUE]; struct ubsec_q *sc_queuea[UBS_MAX_NQUEUE]; SIMPLEQ_HEAD(,ubsec_q2) sc_q2free; /* free list */ }; #define UBSEC_QFLAGS_COPYOUTIV 0x1 struct ubsec_session { - u_int32_t ses_used; u_int32_t ses_deskey[6]; /* 3DES key */ u_int32_t ses_mlen; /* hmac length */ u_int32_t ses_hminner[5]; /* hmac inner state */ u_int32_t ses_hmouter[5]; /* hmac outer state */ u_int32_t ses_iv[2]; /* [3]DES iv */ }; #endif /* _KERNEL */ struct ubsec_stats { u_int64_t hst_ibytes; u_int64_t hst_obytes; u_int32_t hst_ipackets; u_int32_t hst_opackets; u_int32_t hst_invalid; /* invalid argument */ u_int32_t hst_badsession; /* invalid session id */ u_int32_t hst_badflags; /* flags indicate !(mbuf | uio) */ u_int32_t hst_nodesc; /* op submitted w/o descriptors */ u_int32_t hst_badalg; /* unsupported algorithm */ u_int32_t hst_nomem; u_int32_t hst_queuefull; u_int32_t hst_dmaerr; u_int32_t hst_mcrerr; u_int32_t hst_nodmafree; u_int32_t hst_lenmismatch; /* enc/auth lengths different */ u_int32_t hst_skipmismatch; /* enc part begins before auth part */ u_int32_t hst_iovmisaligned; /* iov op not aligned */ u_int32_t hst_noirq; /* IRQ for no reason */ u_int32_t hst_unaligned; /* unaligned src caused copy */ u_int32_t hst_nomap; /* bus_dmamap_create failed */ u_int32_t hst_noload; /* bus_dmamap_load_* failed */ u_int32_t hst_nombuf; /* MGET* failed */ u_int32_t hst_nomcl; /* MCLGET* failed */ u_int32_t hst_totbatch; /* ops submitted w/o interrupt */ u_int32_t hst_maxbatch; /* max ops submitted together */ u_int32_t hst_maxqueue; /* max ops queued for submission */ u_int32_t hst_maxqchip; /* max mcr1 ops out for processing */ u_int32_t hst_mcr1full; /* MCR1 too busy to take ops */ u_int32_t hst_rng; /* RNG requests */ u_int32_t hst_modexp; /* MOD EXP requests */ u_int32_t hst_modexpcrt; /* MOD EXP CRT requests */ }; Index: head/sys/geom/eli/g_eli.c =================================================================== --- head/sys/geom/eli/g_eli.c (revision 336438) +++ head/sys/geom/eli/g_eli.c (revision 336439) @@ -1,1338 +1,1338 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(geom_eli, "GEOM crypto module"); MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); static int g_eli_version = G_ELI_VERSION; SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, "GELI version"); int g_eli_debug = 0; SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, "Debug level"); static u_int g_eli_tries = 3; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, "Number of tries for entering the passphrase"); static u_int g_eli_visible_passphrase = GETS_NOECHO; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, &g_eli_visible_passphrase, 0, "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); u_int g_eli_overwrites = G_ELI_OVERWRITES; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 0, "Number of times on-disk keys should be overwritten when destroying them"); static u_int g_eli_threads = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, "Number of threads doing crypto work"); u_int g_eli_batch = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, "Use crypto operations batching"); /* * Passphrase cached during boot, in order to be more user-friendly if * there are multiple providers using the same passphrase. */ static char cached_passphrase[256]; static u_int g_eli_boot_passcache = 1; TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, &g_eli_boot_passcache, 0, "Passphrases are cached during boot process for possible reuse"); static void fetch_loader_passphrase(void * dummy) { char * env_passphrase; KASSERT(dynamic_kenv, ("need dynamic kenv")); if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { /* Extract passphrase from the environment. */ strlcpy(cached_passphrase, env_passphrase, sizeof(cached_passphrase)); freeenv(env_passphrase); /* Wipe the passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); } } SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, fetch_loader_passphrase, NULL); static void zero_boot_passcache(void) { explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); } static void zero_geli_intake_keys(void) { struct keybuf *keybuf; int i; if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, clear all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { explicit_bzero(keybuf->kb_ents[i].ke_data, sizeof(keybuf->kb_ents[i].ke_data)); keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; } } } } static void zero_intake_passcache(void *dummy) { zero_boot_passcache(); zero_geli_intake_keys(); } EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); static eventhandler_tag g_eli_pre_sync = NULL; static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp); static void g_eli_init(struct g_class *mp); static void g_eli_fini(struct g_class *mp); static g_taste_t g_eli_taste; static g_dumpconf_t g_eli_dumpconf; struct g_class g_eli_class = { .name = G_ELI_CLASS_NAME, .version = G_VERSION, .ctlreq = g_eli_config, .taste = g_eli_taste, .destroy_geom = g_eli_destroy_geom, .init = g_eli_init, .fini = g_eli_fini }; /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ /* * EAGAIN from crypto(9) means, that we were probably balanced to another crypto * accelerator or something like this. * The function updates the SID and rerun the operation. */ int g_eli_crypto_rerun(struct cryptop *crp) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; bp = (struct bio *)crp->crp_opaque; sc = bp->bio_to->geom->softc; LIST_FOREACH(wr, &sc->sc_workers, w_next) { if (wr->w_number == bp->bio_pflags) break; } KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); - G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", - bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, - (uintmax_t)crp->crp_sid); - wr->w_sid = crp->crp_sid; + G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).", + bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid, + crp->crp_session); + wr->w_sid = crp->crp_session; crp->crp_etype = 0; error = crypto_dispatch(crp); if (error == 0) return (0); G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); crp->crp_etype = error; return (error); } static void g_eli_getattr_done(struct bio *bp) { if (bp->bio_error == 0 && !strcmp(bp->bio_attribute, "GEOM::physpath")) { strlcat(bp->bio_data, "/eli", bp->bio_length); } g_std_done(bp); } /* * The function is called afer reading encrypted data from the provider. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_read_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; sc = pbp->bio_to->geom->softc; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; if (pbp->bio_driver2 != NULL) { free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; } g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); return; } mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, pbp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); } /* * The function is called after we encrypt and write data. * * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver */ void g_eli_write_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; } else pbp->bio_completed = pbp->bio_length; /* * Write is finished, send it up. */ sc = pbp->bio_to->geom->softc; g_io_deliver(pbp, pbp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); } /* * This function should never be called, but GEOM made as it set ->orphan() * method for every geom. */ static void g_eli_orphan_spoil_assert(struct g_consumer *cp) { panic("Function %s() called for %s.", __func__, cp->geom->name); } static void g_eli_orphan(struct g_consumer *cp) { struct g_eli_softc *sc; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; g_eli_destroy(sc, TRUE); } /* * BIO_READ: * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ static void g_eli_start(struct bio *bp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *cbp; sc = bp->bio_to->geom->softc; KASSERT(sc != NULL, ("Provider's error should be set (error=%d)(device=%s).", bp->bio_to->error, bp->bio_to->name)); G_ELI_LOGREQ(2, bp, "Request received."); switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_GETATTR: case BIO_FLUSH: case BIO_ZONE: break; case BIO_DELETE: /* * If the user hasn't set the NODELETE flag, we just pass * it down the stack and let the layers beneath us do (or * not) whatever they do with it. If they have, we * reject it. A possible extension would be an * additional flag to take it as a hint to shred the data * with [multiple?] overwrites. */ if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) break; default: g_io_deliver(bp, EOPNOTSUPP); return; } cbp = g_clone_bio(bp); if (cbp == NULL) { g_io_deliver(bp, ENOMEM); return; } bp->bio_driver1 = cbp; bp->bio_pflags = G_ELI_NEW_BIO; switch (bp->bio_cmd) { case BIO_READ: if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { g_eli_crypto_read(sc, bp, 0); break; } /* FALLTHROUGH */ case BIO_WRITE: mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); break; case BIO_GETATTR: case BIO_FLUSH: case BIO_DELETE: case BIO_ZONE: if (bp->bio_cmd == BIO_GETATTR) cbp->bio_done = g_eli_getattr_done; else cbp->bio_done = g_std_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); break; } } static int g_eli_newsession(struct g_eli_worker *wr) { struct g_eli_softc *sc; struct cryptoini crie, cria; int error; sc = wr->w_softc; bzero(&crie, sizeof(crie)); crie.cri_alg = sc->sc_ealgo; crie.cri_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crie.cri_klen <<= 1; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { crie.cri_key = g_eli_key_hold(sc, 0, LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); } else { crie.cri_key = sc->sc_ekey; } if (sc->sc_flags & G_ELI_FLAG_AUTH) { bzero(&cria, sizeof(cria)); cria.cri_alg = sc->sc_aalgo; cria.cri_klen = sc->sc_akeylen; cria.cri_key = sc->sc_akey; crie.cri_next = &cria; } switch (sc->sc_crypto) { case G_ELI_CRYPTO_SW: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_SOFTWARE); break; case G_ELI_CRYPTO_HW: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_HARDWARE); break; case G_ELI_CRYPTO_UNKNOWN: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_HARDWARE); if (error == 0) { mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) sc->sc_crypto = G_ELI_CRYPTO_HW; mtx_unlock(&sc->sc_queue_mtx); } else { error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_SOFTWARE); mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) sc->sc_crypto = G_ELI_CRYPTO_SW; mtx_unlock(&sc->sc_queue_mtx); } break; default: panic("%s: invalid condition", __func__); } if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) g_eli_key_drop(sc, crie.cri_key); return (error); } static void g_eli_freesession(struct g_eli_worker *wr) { crypto_freesession(wr->w_sid); } static void g_eli_cancel(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, ("Not new bio when canceling (bp=%p).", bp)); g_io_deliver(bp, ENXIO); } } static struct bio * g_eli_takefirst(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) return (bioq_takefirst(&sc->sc_queue)); /* * Device suspended, so we skip new I/O requests. */ TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { if (bp->bio_pflags != G_ELI_NEW_BIO) break; } if (bp != NULL) bioq_remove(&sc->sc_queue, bp); return (bp); } /* * This is the main function for kernel worker thread when we don't have * hardware acceleration and we have to do cryptography in software. * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM * threads with crypto work. */ static void g_eli_worker(void *arg) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; wr = arg; sc = wr->w_softc; #ifdef EARLY_AP_STARTUP MPASS(!sc->sc_cpubind || smp_started); #elif defined(SMP) /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ if (sc->sc_cpubind) { while (!smp_started) tsleep(wr, 0, "geli:smp", hz / 4); } #endif thread_lock(curthread); sched_prio(curthread, PUSER); if (sc->sc_cpubind) sched_bind(curthread, wr->w_number % mp_ncpus); thread_unlock(curthread); G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); for (;;) { mtx_lock(&sc->sc_queue_mtx); again: bp = g_eli_takefirst(sc); if (bp == NULL) { if (sc->sc_flags & G_ELI_FLAG_DESTROY) { g_eli_cancel(sc); LIST_REMOVE(wr, w_next); g_eli_freesession(wr); free(wr, M_ELI); G_ELI_DEBUG(1, "Thread %s exiting.", curthread->td_proc->p_comm); wakeup(&sc->sc_workers); mtx_unlock(&sc->sc_queue_mtx); kproc_exit(0); } while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { if (sc->sc_inflight > 0) { G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight); /* * We still have inflight BIOs, so * sleep and retry. */ msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:inf", hz / 5); goto again; } /* * Suspend requested, mark the worker as * suspended and go to sleep. */ if (wr->w_active) { g_eli_freesession(wr); wr->w_active = FALSE; } wakeup(&sc->sc_workers); msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:suspend", 0); if (!wr->w_active && !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { error = g_eli_newsession(wr); KASSERT(error == 0, ("g_eli_newsession() failed on resume (error=%d)", error)); wr->w_active = TRUE; } goto again; } msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); continue; } if (bp->bio_pflags == G_ELI_NEW_BIO) atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); if (bp->bio_pflags == G_ELI_NEW_BIO) { bp->bio_pflags = 0; if (sc->sc_flags & G_ELI_FLAG_AUTH) { if (bp->bio_cmd == BIO_READ) g_eli_auth_read(sc, bp); else g_eli_auth_run(wr, bp); } else { if (bp->bio_cmd == BIO_READ) g_eli_crypto_read(sc, bp, 1); else g_eli_crypto_run(wr, bp); } } else { if (sc->sc_flags & G_ELI_FLAG_AUTH) g_eli_auth_run(wr, bp); else g_eli_crypto_run(wr, bp); } } } int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, struct g_eli_metadata *md) { struct g_geom *gp; struct g_consumer *cp; u_char *buf = NULL; int error; g_topology_assert(); gp = g_new_geomf(mp, "eli:taste"); gp->start = g_eli_start; gp->access = g_std_access; /* * g_eli_read_metadata() is always called from the event thread. * Our geom is created and destroyed in the same event, so there * could be no orphan nor spoil event in the meantime. */ gp->orphan = g_eli_orphan_spoil_assert; gp->spoiled = g_eli_orphan_spoil_assert; cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error != 0) goto end; error = g_access(cp, 1, 0, 0); if (error != 0) goto end; g_topology_unlock(); buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, &error); g_topology_lock(); if (buf == NULL) goto end; error = eli_metadata_decode(buf, md); if (error != 0) goto end; /* Metadata was read and decoded successfully. */ end: if (buf != NULL) g_free(buf); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, 0, 0); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); return (error); } /* * The function is called when we had last close on provider and user requested * to close it when this situation occur. */ static void g_eli_last_close(void *arg, int flags __unused) { struct g_geom *gp; char gpname[64]; int error; g_topology_assert(); gp = arg; strlcpy(gpname, gp->name, sizeof(gpname)); error = g_eli_destroy(gp->softc, TRUE); KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", gpname, error)); G_ELI_DEBUG(0, "Detached %s on last close.", gpname); } int g_eli_access(struct g_provider *pp, int dr, int dw, int de) { struct g_eli_softc *sc; struct g_geom *gp; gp = pp->geom; sc = gp->softc; if (dw > 0) { if (sc->sc_flags & G_ELI_FLAG_RO) { /* Deny write attempts. */ return (EROFS); } /* Someone is opening us for write, we need to remember that. */ sc->sc_flags |= G_ELI_FLAG_WOPEN; return (0); } /* Is this the last close? */ if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) return (0); /* * Automatically detach on last close if requested. */ if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || (sc->sc_flags & G_ELI_FLAG_WOPEN)) { g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); } return (0); } static int g_eli_cpu_is_disabled(int cpu) { #ifdef SMP return (CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (0); #endif } struct g_geom * g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, const struct g_eli_metadata *md, const u_char *mkey, int nkey) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; u_int i, threads; int error; G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); gp->start = g_eli_start; /* * Spoiling can happen even though we have the provider open * exclusively, e.g. through media change events. */ gp->spoiled = g_eli_orphan; gp->orphan = g_eli_orphan; gp->dumpconf = g_eli_dumpconf; /* * If detach-on-last-close feature is not enabled and we don't operate * on read-only provider, we can simply use g_std_access(). */ if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) gp->access = g_eli_access; else gp->access = g_std_access; eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); sc->sc_nkey = nkey; gp->softc = sc; sc->sc_geom = gp; bioq_init(&sc->sc_queue); mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); pp = NULL; cp = g_new_consumer(gp); error = g_attach(cp, bpp); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot attach to %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", bpp->name, error); } goto failed; } /* * Keep provider open all the time, so we can run critical tasks, * like Master Keys deletion, without wondering if we can open * provider or not. * We don't open provider for writing only when user requested read-only * access. */ if (sc->sc_flags & G_ELI_FLAG_RO) error = g_access(cp, 1, 0, 1); else error = g_access(cp, 1, 1, 1); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot access %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot access %s (error=%d).", bpp->name, error); } goto failed; } /* * Remember the keys in our softc structure. */ g_eli_mkey_propagate(sc, mkey); LIST_INIT(&sc->sc_workers); threads = g_eli_threads; if (threads == 0) threads = mp_ncpus; sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); for (i = 0; i < threads; i++) { if (g_eli_cpu_is_disabled(i)) { G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", bpp->name, i); continue; } wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); wr->w_softc = sc; wr->w_number = i; wr->w_active = TRUE; error = g_eli_newsession(wr); if (error != 0) { free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } goto failed; } error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, "g_eli[%u] %s", i, bpp->name); if (error != 0) { g_eli_freesession(wr); free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } goto failed; } LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); } /* * Create decrypted provider. */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; g_error_provider(pp, 0); G_ELI_DEBUG(0, "Device %s created.", pp->name); G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), sc->sc_ekeylen); if (sc->sc_flags & G_ELI_FLAG_AUTH) G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); G_ELI_DEBUG(0, " Crypto: %s", sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); return (gp); failed: mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); /* * Wait for kernel threads self destruction. */ while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, -1, -1); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); return (NULL); } int g_eli_destroy(struct g_eli_softc *sc, boolean_t force) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (sc == NULL) return (ENXIO); gp = sc->sc_geom; pp = LIST_FIRST(&gp->provider); if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { if (force) { G_ELI_DEBUG(1, "Device %s is still open, so it " "cannot be definitely removed.", pp->name); sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; g_wither_provider(pp, ENXIO); return (EBUSY); } else { G_ELI_DEBUG(1, "Device %s is still open (r%dw%de%d).", pp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } } mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); gp->softc = NULL; g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); g_wither_geom_close(gp, ENXIO); return (0); } static int g_eli_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, struct g_geom *gp) { struct g_eli_softc *sc; sc = gp->softc; return (g_eli_destroy(sc, FALSE)); } static int g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) { u_char *keyfile, *data; char *file, name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL && i == 0) { /* * If there is only one keyfile, allow simpler name. */ snprintf(name, sizeof(name), "%s:geli_keyfile", provider); keyfile = preload_search_by_type(name); } if (keyfile == NULL) return (i); /* Return number of loaded keyfiles. */ data = preload_fetch_addr(keyfile); if (data == NULL) { G_ELI_DEBUG(0, "Cannot find key file data for %s.", name); return (0); } size = preload_fetch_size(keyfile); if (size == 0) { G_ELI_DEBUG(0, "Cannot find key file size for %s.", name); return (0); } file = preload_search_info(keyfile, MODINFO_NAME); if (file == NULL) { G_ELI_DEBUG(0, "Cannot find key file name for %s.", name); return (0); } G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, provider, name); g_eli_crypto_hmac_update(ctx, data, size); } } static void g_eli_keyfiles_clear(const char *provider) { u_char *keyfile, *data; char name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL) return; data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data != NULL && size != 0) bzero(data, size); } } /* * Tasting is only made on boot. * We detect providers which should be attached before root is mounted. */ static struct g_geom * g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_eli_metadata md; struct g_geom *gp; struct hmac_ctx ctx; char passphrase[256]; u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; u_int i, nkey, nkeyfiles, tries, showpass; int error; struct keybuf *keybuf; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); if (root_mounted() || g_eli_tries == 0) return (NULL); G_ELI_DEBUG(3, "Tasting %s.", pp->name); error = g_eli_read_metadata(mp, pp, &md); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) return (NULL); if (md.md_version > G_ELI_VERSION) { printf("geom_eli.ko module is too old to handle %s.\n", pp->name); return (NULL); } if (md.md_provsize != pp->mediasize) return (NULL); /* Should we attach it on boot? */ if (!(md.md_flags & G_ELI_FLAG_BOOT)) return (NULL); if (md.md_keys == 0x00) { G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); return (NULL); } if (md.md_iterations == -1) { /* If there is no passphrase, we try only once. */ tries = 1; } else { /* Ask for the passphrase no more than g_eli_tries times. */ tries = g_eli_tries; } if ((keybuf = get_keybuf()) != NULL) { /* Scan the key buffer, try all GELI keys. */ for (i = 0; i < keybuf->kb_nents; i++) { if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { memcpy(key, keybuf->kb_ents[i].ke_data, sizeof(key)); if (g_eli_mkey_decrypt_any(&md, key, mkey, &nkey) == 0 ) { explicit_bzero(key, sizeof(key)); goto have_key; } } } } for (i = 0; i <= tries; i++) { g_eli_crypto_hmac_init(&ctx, NULL, 0); /* * Load all key files. */ nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); if (nkeyfiles == 0 && md.md_iterations == -1) { /* * No key files and no passphrase, something is * definitely wrong here. * geli(8) doesn't allow for such situation, so assume * that there was really no passphrase and in that case * key files are no properly defined in loader.conf. */ G_ELI_DEBUG(0, "Found no key files in loader.conf for %s.", pp->name); return (NULL); } /* Ask for the passphrase if defined. */ if (md.md_iterations >= 0) { /* Try first with cached passphrase. */ if (i == 0) { if (!g_eli_boot_passcache) continue; memcpy(passphrase, cached_passphrase, sizeof(passphrase)); } else { printf("Enter passphrase for %s: ", pp->name); showpass = g_eli_visible_passphrase; if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) showpass = GETS_ECHOPASS; cngets(passphrase, sizeof(passphrase), showpass); memcpy(cached_passphrase, passphrase, sizeof(passphrase)); } } /* * Prepare Derived-Key from the user passphrase. */ if (md.md_iterations == 0) { g_eli_crypto_hmac_update(&ctx, md.md_salt, sizeof(md.md_salt)); g_eli_crypto_hmac_update(&ctx, passphrase, strlen(passphrase)); explicit_bzero(passphrase, sizeof(passphrase)); } else if (md.md_iterations > 0) { u_char dkey[G_ELI_USERKEYLEN]; pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, sizeof(md.md_salt), passphrase, md.md_iterations); bzero(passphrase, sizeof(passphrase)); g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); explicit_bzero(dkey, sizeof(dkey)); } g_eli_crypto_hmac_final(&ctx, key, 0); /* * Decrypt Master-Key. */ error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey); bzero(key, sizeof(key)); if (error == -1) { if (i == tries) { G_ELI_DEBUG(0, "Wrong key for %s. No tries left.", pp->name); g_eli_keyfiles_clear(pp->name); return (NULL); } if (i > 0) { G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", pp->name, tries - i); } /* Try again. */ continue; } else if (error > 0) { G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", pp->name, error); g_eli_keyfiles_clear(pp->name); return (NULL); } g_eli_keyfiles_clear(pp->name); G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); break; } have_key: /* * We have correct key, let's attach provider. */ gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); bzero(mkey, sizeof(mkey)); bzero(&md, sizeof(md)); if (gp == NULL) { G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, G_ELI_SUFFIX); return (NULL); } return (gp); } static void g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_eli_softc *sc; g_topology_assert(); sc = gp->softc; if (sc == NULL) return; if (pp != NULL || cp != NULL) return; /* Nothing here. */ sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_total); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_allocated); sbuf_printf(sb, "%s", indent); if (sc->sc_flags == 0) sbuf_printf(sb, "NONE"); else { int first = 1; #define ADD_FLAG(flag, name) do { \ if (sc->sc_flags & (flag)) { \ if (!first) \ sbuf_printf(sb, ", "); \ else \ first = 0; \ sbuf_printf(sb, name); \ } \ } while (0) ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); #undef ADD_FLAG } sbuf_printf(sb, "\n"); if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { sbuf_printf(sb, "%s%u\n", indent, sc->sc_nkey); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_version); sbuf_printf(sb, "%s", indent); switch (sc->sc_crypto) { case G_ELI_CRYPTO_HW: sbuf_printf(sb, "hardware"); break; case G_ELI_CRYPTO_SW: sbuf_printf(sb, "software"); break; default: sbuf_printf(sb, "UNKNOWN"); break; } sbuf_printf(sb, "\n"); if (sc->sc_flags & G_ELI_FLAG_AUTH) { sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_aalgo)); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_ekeylen); sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_ealgo)); sbuf_printf(sb, "%s%s\n", indent, (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); } static void g_eli_shutdown_pre_sync(void *arg, int howto) { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp; struct g_eli_softc *sc; int error; mp = arg; g_topology_lock(); LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { sc = gp->softc; if (sc == NULL) continue; pp = LIST_FIRST(&gp->provider); KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); if (pp->acr + pp->acw + pp->ace == 0) error = g_eli_destroy(sc, TRUE); else { sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; } } g_topology_unlock(); } static void g_eli_init(struct g_class *mp) { g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); if (g_eli_pre_sync == NULL) G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); } static void g_eli_fini(struct g_class *mp) { if (g_eli_pre_sync != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); } DECLARE_GEOM_CLASS(g_eli_class, g_eli); MODULE_DEPEND(g_eli, crypto, 1, 1, 1); MODULE_VERSION(geom_eli, 0); Index: head/sys/geom/eli/g_eli_crypto.c =================================================================== --- head/sys/geom/eli/g_eli_crypto.c (revision 336438) +++ head/sys/geom/eli/g_eli_crypto.c (revision 336439) @@ -1,225 +1,225 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2010 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #else #include #include #include #include #include #include #define _OpenSSL_ #endif #include #ifdef _KERNEL MALLOC_DECLARE(M_ELI); static int g_eli_crypto_done(struct cryptop *crp) { crp->crp_opaque = (void *)crp; wakeup(crp); return (0); } static int g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize, const u_char *key, size_t keysize) { struct cryptoini cri; struct cryptop *crp; struct cryptodesc *crd; crypto_session_t sid; u_char *p; int error; KASSERT(algo != CRYPTO_AES_XTS, ("%s: CRYPTO_AES_XTS unexpected here", __func__)); bzero(&cri, sizeof(cri)); cri.cri_alg = algo; cri.cri_key = __DECONST(void *, key); cri.cri_klen = keysize; error = crypto_newsession(&sid, &cri, CRYPTOCAP_F_SOFTWARE); if (error != 0) return (error); p = malloc(sizeof(*crp) + sizeof(*crd), M_ELI, M_NOWAIT | M_ZERO); if (p == NULL) { crypto_freesession(sid); return (ENOMEM); } crp = (struct cryptop *)p; p += sizeof(*crp); crd = (struct cryptodesc *)p; p += sizeof(*crd); crd->crd_skip = 0; crd->crd_len = datasize; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if (enc) crd->crd_flags |= CRD_F_ENCRYPT; crd->crd_alg = algo; crd->crd_key = __DECONST(void *, key); crd->crd_klen = keysize; bzero(crd->crd_iv, sizeof(crd->crd_iv)); crd->crd_next = NULL; - crp->crp_sid = sid; + crp->crp_session = sid; crp->crp_ilen = datasize; crp->crp_olen = datasize; crp->crp_opaque = NULL; crp->crp_callback = g_eli_crypto_done; crp->crp_buf = (void *)data; crp->crp_flags = CRYPTO_F_CBIFSYNC; crp->crp_desc = crd; error = crypto_dispatch(crp); if (error == 0) { while (crp->crp_opaque == NULL) tsleep(crp, PRIBIO, "geli", hz / 5); error = crp->crp_etype; } free(crp, M_ELI); crypto_freesession(sid); return (error); } #else /* !_KERNEL */ static int g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize, const u_char *key, size_t keysize) { EVP_CIPHER_CTX ctx; const EVP_CIPHER *type; u_char iv[keysize]; int outsize; assert(algo != CRYPTO_AES_XTS); switch (algo) { case CRYPTO_NULL_CBC: type = EVP_enc_null(); break; case CRYPTO_AES_CBC: switch (keysize) { case 128: type = EVP_aes_128_cbc(); break; case 192: type = EVP_aes_192_cbc(); break; case 256: type = EVP_aes_256_cbc(); break; default: return (EINVAL); } break; case CRYPTO_BLF_CBC: type = EVP_bf_cbc(); break; #ifndef OPENSSL_NO_CAMELLIA case CRYPTO_CAMELLIA_CBC: switch (keysize) { case 128: type = EVP_camellia_128_cbc(); break; case 192: type = EVP_camellia_192_cbc(); break; case 256: type = EVP_camellia_256_cbc(); break; default: return (EINVAL); } break; #endif case CRYPTO_3DES_CBC: type = EVP_des_ede3_cbc(); break; default: return (EINVAL); } EVP_CIPHER_CTX_init(&ctx); EVP_CipherInit_ex(&ctx, type, NULL, NULL, NULL, enc); EVP_CIPHER_CTX_set_key_length(&ctx, keysize / 8); EVP_CIPHER_CTX_set_padding(&ctx, 0); bzero(iv, sizeof(iv)); EVP_CipherInit_ex(&ctx, NULL, NULL, key, iv, enc); if (EVP_CipherUpdate(&ctx, data, &outsize, data, datasize) == 0) { EVP_CIPHER_CTX_cleanup(&ctx); return (EINVAL); } assert(outsize == (int)datasize); if (EVP_CipherFinal_ex(&ctx, data + outsize, &outsize) == 0) { EVP_CIPHER_CTX_cleanup(&ctx); return (EINVAL); } assert(outsize == 0); EVP_CIPHER_CTX_cleanup(&ctx); return (0); } #endif /* !_KERNEL */ int g_eli_crypto_encrypt(u_int algo, u_char *data, size_t datasize, const u_char *key, size_t keysize) { /* We prefer AES-CBC for metadata protection. */ if (algo == CRYPTO_AES_XTS) algo = CRYPTO_AES_CBC; return (g_eli_crypto_cipher(algo, 1, data, datasize, key, keysize)); } int g_eli_crypto_decrypt(u_int algo, u_char *data, size_t datasize, const u_char *key, size_t keysize) { /* We prefer AES-CBC for metadata protection. */ if (algo == CRYPTO_AES_XTS) algo = CRYPTO_AES_CBC; return (g_eli_crypto_cipher(algo, 0, data, datasize, key, keysize)); } Index: head/sys/geom/eli/g_eli_integrity.c =================================================================== --- head/sys/geom/eli/g_eli_integrity.c (revision 336438) +++ head/sys/geom/eli/g_eli_integrity.c (revision 336439) @@ -1,540 +1,540 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The data layout description when integrity verification is configured. * * One of the most important assumption here is that authenticated data and its * HMAC has to be stored in the same place (namely in the same sector) to make * it work reliable. * The problem is that file systems work only with sectors that are multiple of * 512 bytes and a power of two number. * My idea to implement it is as follows. * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for * data. We can't use that directly (ie. we can't create provider with 480 bytes * sector size). We need another sector from where we take only 32 bytes of data * and we store HMAC of this data as well. This takes two sectors from the * original provider at the input and leaves us one sector of authenticated data * at the output. Not very efficient, but you got the idea. * Now, let's assume, we want to create provider with 4096 bytes sector. * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the * output. That's better. With 4096 bytes sector we can use 89% of size of the * original provider. I find it as an acceptable cost. * The reliability comes from the fact, that every HMAC stored inside the sector * is calculated only for the data in the same sector, so its impossible to * write new data and leave old HMAC or vice versa. * * And here is the picture: * * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| * +----------+ * da0.eli: +----+----+----+----+----+----+----+----+----+ * |480b|480b|480b|480b|480b|480b|480b|480b|256b| * +----+----+----+----+----+----+----+----+----+ * | 4096 bytes | * +--------------------------------------------+ * * PS. You can use any sector size with geli(8). My example is using 4kB, * because it's most efficient. For 8kB sectors you need 2 extra sectors, * so the cost is the same as for 4kB sectors. */ /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ MALLOC_DECLARE(M_ELI); /* * Here we generate key for HMAC. Every sector has its own HMAC key, so it is * not possible to copy sectors. * We cannot depend on fact, that every sector has its own IV, because different * IV doesn't change HMAC, when we use encrypt-then-authenticate method. */ static void g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) { SHA256_CTX ctx; /* Copy precalculated SHA256 context. */ bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); SHA256_Final(key, &ctx); } /* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver */ static int g_eli_auth_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { bp->bio_completed += crp->crp_olen; G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).", bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed); } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_next->crd_key); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error == 0) { u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; u_char *srcdata, *dstdata, *auth; off_t coroff, corsize; /* * Verify data integrity based on calculated and read HMACs. */ /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; srcdata = bp->bio_driver2; dstdata = bp->bio_data; auth = srcdata + encr_secsize * nsec; coroff = -1; corsize = 0; for (i = 1; i <= nsec; i++) { data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (bcmp(srcdata, auth, sc->sc_alen) != 0) { /* * Curruption detected, remember the offset if * this is the first corrupted sector and * increase size. */ if (bp->bio_error == 0) bp->bio_error = -1; if (coroff == -1) { coroff = bp->bio_offset + (dstdata - (u_char *)bp->bio_data); } corsize += data_secsize; } else { /* * No curruption, good. * Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); coroff = -1; corsize = 0; } bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); } srcdata += encr_secsize; dstdata += data_secsize; auth += sc->sc_alen; } /* Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); } } free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { if (bp->bio_error == -1) bp->bio_error = EINVAL; else { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); } bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } /* * The function is called after data encryption. * * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_auth_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *bp, *cbp, *cbp2; u_int nsec; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_write_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; cbp->bio_data = bp->bio_driver2; /* * We write more than what is requested, so we have to be ready to write * more than MAXPHYS. */ cbp2 = NULL; if (cbp->bio_length > MAXPHYS) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - MAXPHYS; cbp2->bio_data = cbp->bio_data + MAXPHYS; cbp2->bio_offset = cbp->bio_offset + MAXPHYS; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_write_done; cbp->bio_length = MAXPHYS; } /* * Send encrypted data to the provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); bp->bio_inbed = 0; bp->bio_children = (cbp2 != NULL ? 2 : 1); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } return (0); } void g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) { struct g_consumer *cp; struct bio *cbp, *cbp2; size_t size; off_t nsec; bp->bio_pflags = 0; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_read_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; size = cbp->bio_length; size += sc->sc_alen * nsec; size += sizeof(struct cryptop) * nsec; size += sizeof(struct cryptodesc) * nsec * 2; size += G_ELI_AUTH_SECKEYLEN * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK); cbp->bio_data = bp->bio_driver2; /* * We read more than what is requested, so we have to be ready to read * more than MAXPHYS. */ cbp2 = NULL; if (cbp->bio_length > MAXPHYS) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - MAXPHYS; cbp2->bio_data = cbp->bio_data + MAXPHYS; cbp2->bio_offset = cbp->bio_offset + MAXPHYS; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_read_done; cbp->bio_length = MAXPHYS; } /* * Read encrypted data from provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } } /* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; struct cryptop *crp; struct cryptodesc *crde, *crda; u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; off_t dstoff; u_char *p, *data, *auth, *authkey, *plaindata; int error; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; /* Destination offset, used for IV generation. */ dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; auth = NULL; /* Silence compiler warning. */ plaindata = bp->bio_data; if (bp->bio_cmd == BIO_READ) { data = bp->bio_driver2; auth = data + encr_secsize * nsec; p = auth + sc->sc_alen * nsec; } else { size_t size; size = encr_secsize * nsec; size += sizeof(*crp) * nsec; size += sizeof(*crde) * nsec; size += sizeof(*crda) * nsec; size += G_ELI_AUTH_SECKEYLEN * nsec; size += sizeof(uintptr_t); /* Space for alignment. */ data = malloc(size, M_ELI, M_WAITOK); bp->bio_driver2 = data; p = data + encr_secsize * nsec; } bp->bio_inbed = 0; bp->bio_children = nsec; #if defined(__mips_n64) || defined(__mips_o64) p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); #endif for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { crp = (struct cryptop *)p; p += sizeof(*crp); crde = (struct cryptodesc *)p; p += sizeof(*crde); crda = (struct cryptodesc *)p; p += sizeof(*crda); authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) { data_secsize = decr_secsize % data_secsize; /* * Last encrypted sector of each decrypted sector is * only partially filled. */ if (bp->bio_cmd == BIO_WRITE) memset(data + sc->sc_alen + data_secsize, 0, encr_secsize - sc->sc_alen - data_secsize); } if (bp->bio_cmd == BIO_READ) { /* Remember read HMAC. */ bcopy(data, auth, sc->sc_alen); auth += sc->sc_alen; /* TODO: bzero(9) can be commented out later. */ bzero(data, sc->sc_alen); } else { bcopy(plaindata, data + sc->sc_alen, data_secsize); plaindata += data_secsize; } - crp->crp_sid = wr->w_sid; + crp->crp_session = wr->w_sid; crp->crp_ilen = sc->sc_alen + data_secsize; crp->crp_olen = data_secsize; crp->crp_opaque = (void *)bp; crp->crp_buf = (void *)data; data += encr_secsize; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (g_eli_batch) crp->crp_flags |= CRYPTO_F_BATCH; if (bp->bio_cmd == BIO_WRITE) { crp->crp_callback = g_eli_auth_write_done; crp->crp_desc = crde; crde->crd_next = crda; crda->crd_next = NULL; } else { crp->crp_callback = g_eli_auth_read_done; crp->crp_desc = crda; crda->crd_next = crde; crde->crd_next = NULL; } crde->crd_skip = sc->sc_alen; crde->crd_len = data_secsize; crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) crde->crd_flags |= CRD_F_KEY_EXPLICIT; if (bp->bio_cmd == BIO_WRITE) crde->crd_flags |= CRD_F_ENCRYPT; crde->crd_alg = sc->sc_ealgo; crde->crd_key = g_eli_key_hold(sc, dstoff, encr_secsize); crde->crd_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crde->crd_klen <<= 1; g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv, sizeof(crde->crd_iv)); crda->crd_skip = sc->sc_alen; crda->crd_len = data_secsize; crda->crd_inject = 0; crda->crd_flags = CRD_F_KEY_EXPLICIT; crda->crd_alg = sc->sc_aalgo; g_eli_auth_keygen(sc, dstoff, authkey); crda->crd_key = authkey; crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8; crp->crp_etype = 0; error = crypto_dispatch(crp); KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)", error)); } } Index: head/sys/geom/eli/g_eli_privacy.c =================================================================== --- head/sys/geom/eli/g_eli_privacy.c (revision 336438) +++ head/sys/geom/eli/g_eli_privacy.c (revision 336439) @@ -1,318 +1,318 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ MALLOC_DECLARE(M_ELI); /* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver */ static int g_eli_crypto_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", bp->bio_inbed, bp->bio_children); bp->bio_completed += crp->crp_olen; } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; if (sc != NULL) g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); return (0); } /* * The function is called after data encryption. * * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_crypto_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct bio *bp, *cbp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } gp = bp->bio_to->geom; sc = gp->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); bp->bio_inbed = 0; bp->bio_children = 1; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cbp->bio_data = bp->bio_driver2; cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Send encrypted data to the provider. */ g_io_request(cbp, cp); return (0); } /* * The function is called to read encrypted data. * * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker) { struct g_consumer *cp; struct bio *cbp; if (!fromworker) { /* * We are not called from the worker thread, so check if * device is suspended. */ mtx_lock(&sc->sc_queue_mtx); if (sc->sc_flags & G_ELI_FLAG_SUSPEND) { /* * If device is suspended, we place the request onto * the queue, so it can be handled after resume. */ G_ELI_DEBUG(0, "device suspended, move onto queue"); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); return; } atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); } bp->bio_pflags = 0; bp->bio_driver2 = NULL; cbp = bp->bio_driver1; cbp->bio_done = g_eli_read_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Read encrypted data from provider. */ g_io_request(cbp, cp); } /* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; struct cryptop *crp; struct cryptodesc *crd; u_int i, nsec, secsize; off_t dstoff; size_t size; u_char *p, *data; int error; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; nsec = bp->bio_length / secsize; /* * Calculate how much memory do we need. * We need separate crypto operation for every single sector. * It is much faster to calculate total amount of needed memory here and * do the allocation once instead of allocating memory in pieces (many, * many pieces). */ size = sizeof(*crp) * nsec; size += sizeof(*crd) * nsec; /* * If we write the data we cannot destroy current bio_data content, * so we need to allocate more memory for encrypted data. */ if (bp->bio_cmd == BIO_WRITE) size += bp->bio_length; p = malloc(size, M_ELI, M_WAITOK); bp->bio_inbed = 0; bp->bio_children = nsec; bp->bio_driver2 = p; if (bp->bio_cmd == BIO_READ) data = bp->bio_data; else { data = p; p += bp->bio_length; bcopy(bp->bio_data, data, bp->bio_length); } for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = (struct cryptop *)p; p += sizeof(*crp); crd = (struct cryptodesc *)p; p += sizeof(*crd); - crp->crp_sid = wr->w_sid; + crp->crp_session = wr->w_sid; crp->crp_ilen = secsize; crp->crp_olen = secsize; crp->crp_opaque = (void *)bp; crp->crp_buf = (void *)data; data += secsize; if (bp->bio_cmd == BIO_WRITE) crp->crp_callback = g_eli_crypto_write_done; else /* if (bp->bio_cmd == BIO_READ) */ crp->crp_callback = g_eli_crypto_read_done; crp->crp_flags = CRYPTO_F_CBIFSYNC; if (g_eli_batch) crp->crp_flags |= CRYPTO_F_BATCH; crp->crp_desc = crd; crd->crd_skip = 0; crd->crd_len = secsize; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) crd->crd_flags |= CRD_F_KEY_EXPLICIT; if (bp->bio_cmd == BIO_WRITE) crd->crd_flags |= CRD_F_ENCRYPT; crd->crd_alg = sc->sc_ealgo; crd->crd_key = g_eli_key_hold(sc, dstoff, secsize); crd->crd_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crd->crd_klen <<= 1; g_eli_crypto_ivgen(sc, dstoff, crd->crd_iv, sizeof(crd->crd_iv)); crd->crd_next = NULL; crp->crp_etype = 0; error = crypto_dispatch(crp); KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)", error)); } } Index: head/sys/kgssapi/krb5/kcrypto_aes.c =================================================================== --- head/sys/kgssapi/krb5/kcrypto_aes.c (revision 336438) +++ head/sys/kgssapi/krb5/kcrypto_aes.c (revision 336439) @@ -1,392 +1,392 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "kcrypto.h" struct aes_state { struct mtx as_lock; crypto_session_t as_session_aes; crypto_session_t as_session_sha1; }; static void aes_init(struct krb5_key_state *ks) { struct aes_state *as; as = malloc(sizeof(struct aes_state), M_GSSAPI, M_WAITOK|M_ZERO); mtx_init(&as->as_lock, "gss aes lock", NULL, MTX_DEF); ks->ks_priv = as; } static void aes_destroy(struct krb5_key_state *ks) { struct aes_state *as = ks->ks_priv; if (as->as_session_aes != 0) crypto_freesession(as->as_session_aes); if (as->as_session_sha1 != 0) crypto_freesession(as->as_session_sha1); mtx_destroy(&as->as_lock); free(ks->ks_priv, M_GSSAPI); } static void aes_set_key(struct krb5_key_state *ks, const void *in) { void *kp = ks->ks_key; struct aes_state *as = ks->ks_priv; struct cryptoini cri; if (kp != in) bcopy(in, kp, ks->ks_class->ec_keylen); if (as->as_session_aes != 0) crypto_freesession(as->as_session_aes); if (as->as_session_sha1 != 0) crypto_freesession(as->as_session_sha1); /* * We only want the first 96 bits of the HMAC. */ bzero(&cri, sizeof(cri)); cri.cri_alg = CRYPTO_SHA1_HMAC; cri.cri_klen = ks->ks_class->ec_keybits; cri.cri_mlen = 12; cri.cri_key = ks->ks_key; cri.cri_next = NULL; crypto_newsession(&as->as_session_sha1, &cri, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); bzero(&cri, sizeof(cri)); cri.cri_alg = CRYPTO_AES_CBC; cri.cri_klen = ks->ks_class->ec_keybits; cri.cri_mlen = 0; cri.cri_key = ks->ks_key; cri.cri_next = NULL; crypto_newsession(&as->as_session_aes, &cri, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); } static void aes_random_to_key(struct krb5_key_state *ks, const void *in) { aes_set_key(ks, in); } static int aes_crypto_cb(struct cryptop *crp) { int error; struct aes_state *as = (struct aes_state *) crp->crp_opaque; - if (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC) + if (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC) return (0); error = crp->crp_etype; if (error == EAGAIN) error = crypto_dispatch(crp); mtx_lock(&as->as_lock); if (error || (crp->crp_flags & CRYPTO_F_DONE)) wakeup(crp); mtx_unlock(&as->as_lock); return (0); } static void aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, size_t skip, size_t len, void *ivec, int encdec) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = len; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec; if (ivec) { bcopy(ivec, crd->crd_iv, 16); } else { bzero(crd->crd_iv, 16); } crd->crd_next = NULL; crd->crd_alg = CRYPTO_AES_CBC; - crp->crp_sid = as->as_session_aes; + crp->crp_session = as->as_session_aes; crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC; crp->crp_buf = buf; crp->crp_opaque = (void *) as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(as->as_session_aes) & CRYPTOCAP_F_SYNC) == 0) { + if ((crypto_ses2caps(as->as_session_aes) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); } static void aes_encrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off; /* * AES encryption with cyphertext stealing: * * CTSencrypt(P[0], ..., P[n], IV, K): * len = length(P[n]) * (C[0], ..., C[n-2], E[n-1]) = * CBCencrypt(P[0], ..., P[n-1], IV, K) * P = pad(P[n], 0, blocksize) * E[n] = CBCencrypt(P, E[n-1], K); * C[n-1] = E[n] * C[n] = E[n-1]{0..len-1} */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, CRD_F_ENCRYPT); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. We assume that neither of the * last two blocks cross iov boundaries. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, CRD_F_ENCRYPT); off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); } else { /* * This is the difficult case. We encrypt all but the * last partial block first. We then create a padded * copy of the last block and encrypt that using the * second to last encrypted block as IV. Once we have * the encrypted versions of the last two blocks, we * reshuffle to create the final result. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen, ivec, CRD_F_ENCRYPT); /* * Copy out the last two blocks, pad the last block * and encrypt it. Rearrange to get the final * result. The cyphertext for cn_1 is in cn. The * cyphertext for cn is the first plen bytes of what * is in cn_1 now. */ off = skip + len - blocklen - plen; m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = plen; i < blocklen; i++) last2.cn[i] = 0; aes_encrypt_1(ks, 0, last2.cn, 0, blocklen, last2.cn_1, CRD_F_ENCRYPT); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, plen, last2.cn_1); } } static void aes_decrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off, t; /* * AES decryption with cyphertext stealing: * * CTSencrypt(C[0], ..., C[n], IV, K): * len = length(C[n]) * E[n] = C[n-1] * X = decrypt(E[n], K) * P[n] = (X ^ C[n]){0..len-1} * E[n-1] = {C[n,0],...,C[n,len-1],X[len],...,X[blocksize-1]} * (P[0],...,P[n-1]) = CBCdecrypt(C[0],...,C[n-2],E[n-1], IV, K) */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. */ off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0); } else { /* * This is the difficult case. We first decrypt the * second to last block with a zero IV to make X. The * plaintext for the last block is the XOR of X and * the last cyphertext block. * * We derive a new cypher text for the second to last * block by mixing the unused bytes of X with the last * cyphertext block. The result of that can be * decrypted with the rest in CBC mode. */ off = skip + len - plen - blocklen; aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, off, blocklen, NULL, 0); m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = 0; i < plen; i++) { t = last2.cn[i]; last2.cn[i] ^= last2.cn_1[i]; last2.cn_1[i] = t; } m_copyback(inout, off, blocklen + plen, (void*) &last2); aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen, ivec, 0); } } static void aes_checksum(const struct krb5_key_state *ks, int usage, struct mbuf *inout, size_t skip, size_t inlen, size_t outlen) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = inlen; crd->crd_inject = skip + inlen; crd->crd_flags = 0; crd->crd_next = NULL; crd->crd_alg = CRYPTO_SHA1_HMAC; - crp->crp_sid = as->as_session_sha1; + crp->crp_session = as->as_session_sha1; crp->crp_ilen = inlen; crp->crp_olen = 12; crp->crp_etype = 0; crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (void *) inout; crp->crp_opaque = (void *) as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(as->as_session_sha1) & CRYPTOCAP_F_SYNC) == 0) { + if ((crypto_ses2caps(as->as_session_sha1) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); } struct krb5_encryption_class krb5_aes128_encryption_class = { "aes128-cts-hmac-sha1-96", /* name */ ETYPE_AES128_CTS_HMAC_SHA1_96, /* etype */ EC_DERIVED_KEYS, /* flags */ 16, /* blocklen */ 1, /* msgblocklen */ 12, /* checksumlen */ 128, /* keybits */ 16, /* keylen */ aes_init, aes_destroy, aes_set_key, aes_random_to_key, aes_encrypt, aes_decrypt, aes_checksum }; struct krb5_encryption_class krb5_aes256_encryption_class = { "aes256-cts-hmac-sha1-96", /* name */ ETYPE_AES256_CTS_HMAC_SHA1_96, /* etype */ EC_DERIVED_KEYS, /* flags */ 16, /* blocklen */ 1, /* msgblocklen */ 12, /* checksumlen */ 256, /* keybits */ 32, /* keylen */ aes_init, aes_destroy, aes_set_key, aes_random_to_key, aes_encrypt, aes_decrypt, aes_checksum }; Index: head/sys/kgssapi/krb5/kcrypto_des.c =================================================================== --- head/sys/kgssapi/krb5/kcrypto_des.c (revision 336438) +++ head/sys/kgssapi/krb5/kcrypto_des.c (revision 336439) @@ -1,264 +1,264 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "kcrypto.h" struct des1_state { struct mtx ds_lock; crypto_session_t ds_session; }; static void des1_init(struct krb5_key_state *ks) { struct des1_state *ds; ds = malloc(sizeof(struct des1_state), M_GSSAPI, M_WAITOK|M_ZERO); mtx_init(&ds->ds_lock, "gss des lock", NULL, MTX_DEF); ks->ks_priv = ds; } static void des1_destroy(struct krb5_key_state *ks) { struct des1_state *ds = ks->ks_priv; if (ds->ds_session) crypto_freesession(ds->ds_session); mtx_destroy(&ds->ds_lock); free(ks->ks_priv, M_GSSAPI); } static void des1_set_key(struct krb5_key_state *ks, const void *in) { void *kp = ks->ks_key; struct des1_state *ds = ks->ks_priv; struct cryptoini cri[1]; if (kp != in) bcopy(in, kp, ks->ks_class->ec_keylen); if (ds->ds_session) crypto_freesession(ds->ds_session); bzero(cri, sizeof(cri)); cri[0].cri_alg = CRYPTO_DES_CBC; cri[0].cri_klen = 64; cri[0].cri_mlen = 0; cri[0].cri_key = ks->ks_key; cri[0].cri_next = NULL; crypto_newsession(&ds->ds_session, cri, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); } static void des1_random_to_key(struct krb5_key_state *ks, const void *in) { uint8_t *outkey = ks->ks_key; const uint8_t *inkey = in; /* * Expand 56 bits of random data to 64 bits as follows * (in the example, bit number 1 is the MSB of the 56 * bits of random data): * * expanded = * 1 2 3 4 5 6 7 p * 9 10 11 12 13 14 15 p * 17 18 19 20 21 22 23 p * 25 26 27 28 29 30 31 p * 33 34 35 36 37 38 39 p * 41 42 43 44 45 46 47 p * 49 50 51 52 53 54 55 p * 56 48 40 32 24 16 8 p */ outkey[0] = inkey[0]; outkey[1] = inkey[1]; outkey[2] = inkey[2]; outkey[3] = inkey[3]; outkey[4] = inkey[4]; outkey[5] = inkey[5]; outkey[6] = inkey[6]; outkey[7] = (((inkey[0] & 1) << 1) | ((inkey[1] & 1) << 2) | ((inkey[2] & 1) << 3) | ((inkey[3] & 1) << 4) | ((inkey[4] & 1) << 5) | ((inkey[5] & 1) << 6) | ((inkey[6] & 1) << 7)); des_set_odd_parity((des_cblock *) outkey); if (des_is_weak_key((des_cblock *) outkey)) outkey[7] ^= 0xf0; des1_set_key(ks, ks->ks_key); } static int des1_crypto_cb(struct cryptop *crp) { int error; struct des1_state *ds = (struct des1_state *) crp->crp_opaque; - if (CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) + if (crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) return (0); error = crp->crp_etype; if (error == EAGAIN) error = crypto_dispatch(crp); mtx_lock(&ds->ds_lock); if (error || (crp->crp_flags & CRYPTO_F_DONE)) wakeup(crp); mtx_unlock(&ds->ds_lock); return (0); } static void des1_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, size_t skip, size_t len, void *ivec, int encdec) { struct des1_state *ds = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = len; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec; if (ivec) { bcopy(ivec, crd->crd_iv, 8); } else { bzero(crd->crd_iv, 8); } crd->crd_next = NULL; crd->crd_alg = CRYPTO_DES_CBC; - crp->crp_sid = ds->ds_session; + crp->crp_session = ds->ds_session; crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC; crp->crp_buf = buf; crp->crp_opaque = (void *) ds; crp->crp_callback = des1_crypto_cb; error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { + if ((crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&ds->ds_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &ds->ds_lock, 0, "gssdes", 0); mtx_unlock(&ds->ds_lock); } crypto_freereq(crp); } static void des1_encrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { des1_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, CRD_F_ENCRYPT); } static void des1_decrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { des1_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0); } static int MD5Update_int(void *ctx, void *buf, u_int len) { MD5Update(ctx, buf, len); return (0); } static void des1_checksum(const struct krb5_key_state *ks, int usage, struct mbuf *inout, size_t skip, size_t inlen, size_t outlen) { char hash[16]; MD5_CTX md5; /* * This checksum is specifically for GSS-API. First take the * MD5 checksum of the message, then calculate the CBC mode * checksum of that MD5 checksum using a zero IV. */ MD5Init(&md5); m_apply(inout, skip, inlen, MD5Update_int, &md5); MD5Final(hash, &md5); des1_encrypt_1(ks, 0, hash, 0, 16, NULL, CRD_F_ENCRYPT); m_copyback(inout, skip + inlen, outlen, hash + 8); } struct krb5_encryption_class krb5_des_encryption_class = { "des-cbc-md5", /* name */ ETYPE_DES_CBC_CRC, /* etype */ 0, /* flags */ 8, /* blocklen */ 8, /* msgblocklen */ 8, /* checksumlen */ 56, /* keybits */ 8, /* keylen */ des1_init, des1_destroy, des1_set_key, des1_random_to_key, des1_encrypt, des1_decrypt, des1_checksum }; Index: head/sys/kgssapi/krb5/kcrypto_des3.c =================================================================== --- head/sys/kgssapi/krb5/kcrypto_des3.c (revision 336438) +++ head/sys/kgssapi/krb5/kcrypto_des3.c (revision 336439) @@ -1,404 +1,404 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "kcrypto.h" #define DES3_FLAGS (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) struct des3_state { struct mtx ds_lock; crypto_session_t ds_session; }; static void des3_init(struct krb5_key_state *ks) { struct des3_state *ds; ds = malloc(sizeof(struct des3_state), M_GSSAPI, M_WAITOK|M_ZERO); mtx_init(&ds->ds_lock, "gss des3 lock", NULL, MTX_DEF); ks->ks_priv = ds; } static void des3_destroy(struct krb5_key_state *ks) { struct des3_state *ds = ks->ks_priv; if (ds->ds_session) crypto_freesession(ds->ds_session); mtx_destroy(&ds->ds_lock); free(ks->ks_priv, M_GSSAPI); } static void des3_set_key(struct krb5_key_state *ks, const void *in) { void *kp = ks->ks_key; struct des3_state *ds = ks->ks_priv; struct cryptoini cri[2]; if (kp != in) bcopy(in, kp, ks->ks_class->ec_keylen); if (ds->ds_session) crypto_freesession(ds->ds_session); bzero(cri, sizeof(cri)); cri[0].cri_alg = CRYPTO_SHA1_HMAC; cri[0].cri_klen = 192; cri[0].cri_mlen = 0; cri[0].cri_key = ks->ks_key; cri[0].cri_next = &cri[1]; cri[1].cri_alg = CRYPTO_3DES_CBC; cri[1].cri_klen = 192; cri[1].cri_mlen = 0; cri[1].cri_key = ks->ks_key; cri[1].cri_next = NULL; crypto_newsession(&ds->ds_session, cri, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); } static void des3_random_to_key(struct krb5_key_state *ks, const void *in) { uint8_t *outkey; const uint8_t *inkey; int subkey; for (subkey = 0, outkey = ks->ks_key, inkey = in; subkey < 3; subkey++, outkey += 8, inkey += 7) { /* * Expand 56 bits of random data to 64 bits as follows * (in the example, bit number 1 is the MSB of the 56 * bits of random data): * * expanded = * 1 2 3 4 5 6 7 p * 9 10 11 12 13 14 15 p * 17 18 19 20 21 22 23 p * 25 26 27 28 29 30 31 p * 33 34 35 36 37 38 39 p * 41 42 43 44 45 46 47 p * 49 50 51 52 53 54 55 p * 56 48 40 32 24 16 8 p */ outkey[0] = inkey[0]; outkey[1] = inkey[1]; outkey[2] = inkey[2]; outkey[3] = inkey[3]; outkey[4] = inkey[4]; outkey[5] = inkey[5]; outkey[6] = inkey[6]; outkey[7] = (((inkey[0] & 1) << 1) | ((inkey[1] & 1) << 2) | ((inkey[2] & 1) << 3) | ((inkey[3] & 1) << 4) | ((inkey[4] & 1) << 5) | ((inkey[5] & 1) << 6) | ((inkey[6] & 1) << 7)); des_set_odd_parity((des_cblock *) outkey); if (des_is_weak_key((des_cblock *) outkey)) outkey[7] ^= 0xf0; } des3_set_key(ks, ks->ks_key); } static int des3_crypto_cb(struct cryptop *crp) { int error; struct des3_state *ds = (struct des3_state *) crp->crp_opaque; - if (CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) + if (crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) return (0); error = crp->crp_etype; if (error == EAGAIN) error = crypto_dispatch(crp); mtx_lock(&ds->ds_lock); if (error || (crp->crp_flags & CRYPTO_F_DONE)) wakeup(crp); mtx_unlock(&ds->ds_lock); return (0); } static void des3_encrypt_1(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, int encdec) { struct des3_state *ds = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = len; crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec; if (ivec) { bcopy(ivec, crd->crd_iv, 8); } else { bzero(crd->crd_iv, 8); } crd->crd_next = NULL; crd->crd_alg = CRYPTO_3DES_CBC; - crp->crp_sid = ds->ds_session; + crp->crp_session = ds->ds_session; crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (void *) inout; crp->crp_opaque = (void *) ds; crp->crp_callback = des3_crypto_cb; error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { + if ((crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&ds->ds_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &ds->ds_lock, 0, "gssdes3", 0); mtx_unlock(&ds->ds_lock); } crypto_freereq(crp); } static void des3_encrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { des3_encrypt_1(ks, inout, skip, len, ivec, CRD_F_ENCRYPT); } static void des3_decrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { des3_encrypt_1(ks, inout, skip, len, ivec, 0); } static void des3_checksum(const struct krb5_key_state *ks, int usage, struct mbuf *inout, size_t skip, size_t inlen, size_t outlen) { struct des3_state *ds = ks->ks_priv; struct cryptop *crp; struct cryptodesc *crd; int error; crp = crypto_getreq(1); crd = crp->crp_desc; crd->crd_skip = skip; crd->crd_len = inlen; crd->crd_inject = skip + inlen; crd->crd_flags = 0; crd->crd_next = NULL; crd->crd_alg = CRYPTO_SHA1_HMAC; - crp->crp_sid = ds->ds_session; + crp->crp_session = ds->ds_session; crp->crp_ilen = inlen; crp->crp_olen = 20; crp->crp_etype = 0; crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (void *) inout; crp->crp_opaque = (void *) ds; crp->crp_callback = des3_crypto_cb; error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { + if ((crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&ds->ds_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &ds->ds_lock, 0, "gssdes3", 0); mtx_unlock(&ds->ds_lock); } crypto_freereq(crp); } struct krb5_encryption_class krb5_des3_encryption_class = { "des3-cbc-sha1", /* name */ ETYPE_DES3_CBC_SHA1, /* etype */ EC_DERIVED_KEYS, /* flags */ 8, /* blocklen */ 8, /* msgblocklen */ 20, /* checksumlen */ 168, /* keybits */ 24, /* keylen */ des3_init, des3_destroy, des3_set_key, des3_random_to_key, des3_encrypt, des3_decrypt, des3_checksum }; #if 0 struct des3_dk_test { uint8_t key[24]; uint8_t usage[8]; size_t usagelen; uint8_t dk[24]; }; struct des3_dk_test tests[] = { {{0xdc, 0xe0, 0x6b, 0x1f, 0x64, 0xc8, 0x57, 0xa1, 0x1c, 0x3d, 0xb5, 0x7c, 0x51, 0x89, 0x9b, 0x2c, 0xc1, 0x79, 0x10, 0x08, 0xce, 0x97, 0x3b, 0x92}, {0x00, 0x00, 0x00, 0x01, 0x55}, 5, {0x92, 0x51, 0x79, 0xd0, 0x45, 0x91, 0xa7, 0x9b, 0x5d, 0x31, 0x92, 0xc4, 0xa7, 0xe9, 0xc2, 0x89, 0xb0, 0x49, 0xc7, 0x1f, 0x6e, 0xe6, 0x04, 0xcd}}, {{0x5e, 0x13, 0xd3, 0x1c, 0x70, 0xef, 0x76, 0x57, 0x46, 0x57, 0x85, 0x31, 0xcb, 0x51, 0xc1, 0x5b, 0xf1, 0x1c, 0xa8, 0x2c, 0x97, 0xce, 0xe9, 0xf2}, {0x00, 0x00, 0x00, 0x01, 0xaa}, 5, {0x9e, 0x58, 0xe5, 0xa1, 0x46, 0xd9, 0x94, 0x2a, 0x10, 0x1c, 0x46, 0x98, 0x45, 0xd6, 0x7a, 0x20, 0xe3, 0xc4, 0x25, 0x9e, 0xd9, 0x13, 0xf2, 0x07}}, {{0x98, 0xe6, 0xfd, 0x8a, 0x04, 0xa4, 0xb6, 0x85, 0x9b, 0x75, 0xa1, 0x76, 0x54, 0x0b, 0x97, 0x52, 0xba, 0xd3, 0xec, 0xd6, 0x10, 0xa2, 0x52, 0xbc}, {0x00, 0x00, 0x00, 0x01, 0x55}, 5, {0x13, 0xfe, 0xf8, 0x0d, 0x76, 0x3e, 0x94, 0xec, 0x6d, 0x13, 0xfd, 0x2c, 0xa1, 0xd0, 0x85, 0x07, 0x02, 0x49, 0xda, 0xd3, 0x98, 0x08, 0xea, 0xbf}}, {{0x62, 0x2a, 0xec, 0x25, 0xa2, 0xfe, 0x2c, 0xad, 0x70, 0x94, 0x68, 0x0b, 0x7c, 0x64, 0x94, 0x02, 0x80, 0x08, 0x4c, 0x1a, 0x7c, 0xec, 0x92, 0xb5}, {0x00, 0x00, 0x00, 0x01, 0xaa}, 5, {0xf8, 0xdf, 0xbf, 0x04, 0xb0, 0x97, 0xe6, 0xd9, 0xdc, 0x07, 0x02, 0x68, 0x6b, 0xcb, 0x34, 0x89, 0xd9, 0x1f, 0xd9, 0xa4, 0x51, 0x6b, 0x70, 0x3e}}, {{0xd3, 0xf8, 0x29, 0x8c, 0xcb, 0x16, 0x64, 0x38, 0xdc, 0xb9, 0xb9, 0x3e, 0xe5, 0xa7, 0x62, 0x92, 0x86, 0xa4, 0x91, 0xf8, 0x38, 0xf8, 0x02, 0xfb}, {0x6b, 0x65, 0x72, 0x62, 0x65, 0x72, 0x6f, 0x73}, 8, {0x23, 0x70, 0xda, 0x57, 0x5d, 0x2a, 0x3d, 0xa8, 0x64, 0xce, 0xbf, 0xdc, 0x52, 0x04, 0xd5, 0x6d, 0xf7, 0x79, 0xa7, 0xdf, 0x43, 0xd9, 0xda, 0x43}}, {{0xc1, 0x08, 0x16, 0x49, 0xad, 0xa7, 0x43, 0x62, 0xe6, 0xa1, 0x45, 0x9d, 0x01, 0xdf, 0xd3, 0x0d, 0x67, 0xc2, 0x23, 0x4c, 0x94, 0x07, 0x04, 0xda}, {0x00, 0x00, 0x00, 0x01, 0x55}, 5, {0x34, 0x80, 0x57, 0xec, 0x98, 0xfd, 0xc4, 0x80, 0x16, 0x16, 0x1c, 0x2a, 0x4c, 0x7a, 0x94, 0x3e, 0x92, 0xae, 0x49, 0x2c, 0x98, 0x91, 0x75, 0xf7}}, {{0x5d, 0x15, 0x4a, 0xf2, 0x38, 0xf4, 0x67, 0x13, 0x15, 0x57, 0x19, 0xd5, 0x5e, 0x2f, 0x1f, 0x79, 0x0d, 0xd6, 0x61, 0xf2, 0x79, 0xa7, 0x91, 0x7c}, {0x00, 0x00, 0x00, 0x01, 0xaa}, 5, {0xa8, 0x80, 0x8a, 0xc2, 0x67, 0xda, 0xda, 0x3d, 0xcb, 0xe9, 0xa7, 0xc8, 0x46, 0x26, 0xfb, 0xc7, 0x61, 0xc2, 0x94, 0xb0, 0x13, 0x15, 0xe5, 0xc1}}, {{0x79, 0x85, 0x62, 0xe0, 0x49, 0x85, 0x2f, 0x57, 0xdc, 0x8c, 0x34, 0x3b, 0xa1, 0x7f, 0x2c, 0xa1, 0xd9, 0x73, 0x94, 0xef, 0xc8, 0xad, 0xc4, 0x43}, {0x00, 0x00, 0x00, 0x01, 0x55}, 5, {0xc8, 0x13, 0xf8, 0x8a, 0x3b, 0xe3, 0xb3, 0x34, 0xf7, 0x54, 0x25, 0xce, 0x91, 0x75, 0xfb, 0xe3, 0xc8, 0x49, 0x3b, 0x89, 0xc8, 0x70, 0x3b, 0x49}}, {{0x26, 0xdc, 0xe3, 0x34, 0xb5, 0x45, 0x29, 0x2f, 0x2f, 0xea, 0xb9, 0xa8, 0x70, 0x1a, 0x89, 0xa4, 0xb9, 0x9e, 0xb9, 0x94, 0x2c, 0xec, 0xd0, 0x16}, {0x00, 0x00, 0x00, 0x01, 0xaa}, 5, {0xf4, 0x8f, 0xfd, 0x6e, 0x83, 0xf8, 0x3e, 0x73, 0x54, 0xe6, 0x94, 0xfd, 0x25, 0x2c, 0xf8, 0x3b, 0xfe, 0x58, 0xf7, 0xd5, 0xba, 0x37, 0xec, 0x5d}}, }; #define N_TESTS (sizeof(tests) / sizeof(tests[0])) int main(int argc, char **argv) { struct krb5_key_state *key, *dk; uint8_t *dkp; int j, i; for (j = 0; j < N_TESTS; j++) { struct des3_dk_test *t = &tests[j]; key = krb5_create_key(&des3_encryption_class); krb5_set_key(key, t->key); dk = krb5_derive_key(key, t->usage, t->usagelen); krb5_free_key(key); if (memcmp(dk->ks_key, t->dk, 24)) { printf("DES3 dk("); for (i = 0; i < 24; i++) printf("%02x", t->key[i]); printf(", "); for (i = 0; i < t->usagelen; i++) printf("%02x", t->usage[i]); printf(") failed\n"); printf("should be: "); for (i = 0; i < 24; i++) printf("%02x", t->dk[i]); printf("\n result was: "); dkp = dk->ks_key; for (i = 0; i < 24; i++) printf("%02x", dkp[i]); printf("\n"); } krb5_free_key(dk); } return (0); } #endif Index: head/sys/mips/cavium/cryptocteon/cryptocteon.c =================================================================== --- head/sys/mips/cavium/cryptocteon/cryptocteon.c (revision 336438) +++ head/sys/mips/cavium/cryptocteon/cryptocteon.c (revision 336439) @@ -1,526 +1,444 @@ /* * Octeon Crypto for OCF * * Written by David McCullough * Copyright (C) 2009 David McCullough * * LICENSE TERMS * * The free distribution and use of this software in both source and binary * form is allowed (with or without changes) provided that: * * 1. distributions of this source code include the above copyright * notice, this list of conditions and the following disclaimer; * * 2. distributions in binary form include the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other associated materials; * * 3. the copyright holder's name is not used to endorse products * built using this software without specific written permission. * * DISCLAIMER * * This software is provided 'as is' with no explicit or implied warranties * in respect of its properties, including, but not limited to, correctness * and/or fitness for purpose. * --------------------------------------------------------------------------- */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct cryptocteon_softc { int32_t sc_cid; /* opencrypto id */ - struct octo_sess **sc_sessions; - uint32_t sc_sesnum; }; int cryptocteon_debug = 0; TUNABLE_INT("hw.cryptocteon.debug", &cryptocteon_debug); static void cryptocteon_identify(driver_t *, device_t); static int cryptocteon_probe(device_t); static int cryptocteon_attach(device_t); static int cryptocteon_process(device_t, struct cryptop *, int); -static int cryptocteon_newsession(device_t, u_int32_t *, struct cryptoini *); -static int cryptocteon_freesession(device_t, u_int64_t); +static int cryptocteon_newsession(device_t, crypto_session_t, struct cryptoini *); static void cryptocteon_identify(driver_t *drv, device_t parent) { if (octeon_has_feature(OCTEON_FEATURE_CRYPTO)) BUS_ADD_CHILD(parent, 0, "cryptocteon", 0); } static int cryptocteon_probe(device_t dev) { device_set_desc(dev, "Octeon Secure Coprocessor"); return (0); } static int cryptocteon_attach(device_t dev) { struct cryptocteon_softc *sc; sc = device_get_softc(dev); - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); + sc->sc_cid = crypto_get_driverid(dev, sizeof(struct octo_sess), + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->sc_cid < 0) { device_printf(dev, "crypto_get_driverid ret %d\n", sc->sc_cid); return (ENXIO); } crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); return (0); } /* * Generate a new octo session. We artifically limit it to a single * hash/cipher or hash-cipher combo just to make it easier, most callers * do not expect more than this anyway. */ static int -cryptocteon_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) +cryptocteon_newsession(device_t dev, crypto_session_t cses, + struct cryptoini *cri) { struct cryptoini *c, *encini = NULL, *macini = NULL; struct cryptocteon_softc *sc; - struct octo_sess **ocd; + struct octo_sess *ocd; int i; sc = device_get_softc(dev); - if (sid == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); /* * To keep it simple, we only handle hash, cipher or hash/cipher in a * session, you cannot currently do multiple ciphers/hashes in one * session even though it would be possibel to code this driver to * handle it. */ for (i = 0, c = cri; c && i < 2; i++) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC || c->cri_alg == CRYPTO_NULL_HMAC) { if (macini) { break; } macini = c; } if (c->cri_alg == CRYPTO_DES_CBC || c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC || c->cri_alg == CRYPTO_NULL_CBC) { if (encini) { break; } encini = c; } c = c->cri_next; } if (!macini && !encini) { dprintf("%s,%d - EINVAL bad cipher/hash or combination\n", __FILE__, __LINE__); return EINVAL; } if (c) { dprintf("%s,%d - EINVAL cannot handle chained cipher/hash combos\n", __FILE__, __LINE__); return EINVAL; } /* * So we have something we can do, lets setup the session */ + ocd = crypto_get_driver_session(cses); - if (sc->sc_sessions) { - for (i = 1; i < sc->sc_sesnum; i++) - if (sc->sc_sessions[i] == NULL) - break; - } else - i = 1; /* NB: to silence compiler warning */ - - if (sc->sc_sessions == NULL || i == sc->sc_sesnum) { - if (sc->sc_sessions == NULL) { - i = 1; /* We leave sc->sc_sessions[0] empty */ - sc->sc_sesnum = CRYPTO_SW_SESSIONS; - } else - sc->sc_sesnum *= 2; - - ocd = malloc(sc->sc_sesnum * sizeof(struct octo_sess *), - M_DEVBUF, M_NOWAIT | M_ZERO); - if (ocd == NULL) { - /* Reset session number */ - if (sc->sc_sesnum == CRYPTO_SW_SESSIONS) - sc->sc_sesnum = 0; - else - sc->sc_sesnum /= 2; - dprintf("%s,%d: ENOBUFS\n", __FILE__, __LINE__); - return ENOBUFS; - } - - /* Copy existing sessions */ - if (sc->sc_sessions) { - memcpy(ocd, sc->sc_sessions, - (sc->sc_sesnum / 2) * sizeof(struct octo_sess *)); - free(sc->sc_sessions, M_DEVBUF); - } - - sc->sc_sessions = ocd; - } - - ocd = &sc->sc_sessions[i]; - *sid = i; - - *ocd = malloc(sizeof(struct octo_sess), M_DEVBUF, M_NOWAIT | M_ZERO); - if (*ocd == NULL) { - cryptocteon_freesession(NULL, i); - dprintf("%s,%d: ENOBUFS\n", __FILE__, __LINE__); - return ENOBUFS; - } - if (encini && encini->cri_key) { - (*ocd)->octo_encklen = (encini->cri_klen + 7) / 8; - memcpy((*ocd)->octo_enckey, encini->cri_key, (*ocd)->octo_encklen); + ocd->octo_encklen = (encini->cri_klen + 7) / 8; + memcpy(ocd->octo_enckey, encini->cri_key, ocd->octo_encklen); } if (macini && macini->cri_key) { - (*ocd)->octo_macklen = (macini->cri_klen + 7) / 8; - memcpy((*ocd)->octo_mackey, macini->cri_key, (*ocd)->octo_macklen); + ocd->octo_macklen = (macini->cri_klen + 7) / 8; + memcpy(ocd->octo_mackey, macini->cri_key, ocd->octo_macklen); } - (*ocd)->octo_mlen = 0; + ocd->octo_mlen = 0; if (encini && encini->cri_mlen) - (*ocd)->octo_mlen = encini->cri_mlen; + ocd->octo_mlen = encini->cri_mlen; else if (macini && macini->cri_mlen) - (*ocd)->octo_mlen = macini->cri_mlen; + ocd->octo_mlen = macini->cri_mlen; else - (*ocd)->octo_mlen = 12; + ocd->octo_mlen = 12; /* * point c at the enc if it exists, otherwise the mac */ c = encini ? encini : macini; switch (c->cri_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: - (*ocd)->octo_ivsize = 8; + ocd->octo_ivsize = 8; switch (macini ? macini->cri_alg : -1) { case CRYPTO_MD5_HMAC: - (*ocd)->octo_encrypt = octo_des_cbc_md5_encrypt; - (*ocd)->octo_decrypt = octo_des_cbc_md5_decrypt; - octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_des_cbc_md5_encrypt; + ocd->octo_decrypt = octo_des_cbc_md5_decrypt; + octo_calc_hash(0, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; case CRYPTO_SHA1_HMAC: - (*ocd)->octo_encrypt = octo_des_cbc_sha1_encrypt; - (*ocd)->octo_decrypt = octo_des_cbc_sha1_encrypt; - octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_des_cbc_sha1_encrypt; + ocd->octo_decrypt = octo_des_cbc_sha1_encrypt; + octo_calc_hash(1, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; case -1: - (*ocd)->octo_encrypt = octo_des_cbc_encrypt; - (*ocd)->octo_decrypt = octo_des_cbc_decrypt; + ocd->octo_encrypt = octo_des_cbc_encrypt; + ocd->octo_decrypt = octo_des_cbc_decrypt; break; default: - cryptocteon_freesession(NULL, i); dprintf("%s,%d: EINVALn", __FILE__, __LINE__); return EINVAL; } break; case CRYPTO_AES_CBC: - (*ocd)->octo_ivsize = 16; + ocd->octo_ivsize = 16; switch (macini ? macini->cri_alg : -1) { case CRYPTO_MD5_HMAC: - (*ocd)->octo_encrypt = octo_aes_cbc_md5_encrypt; - (*ocd)->octo_decrypt = octo_aes_cbc_md5_decrypt; - octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_aes_cbc_md5_encrypt; + ocd->octo_decrypt = octo_aes_cbc_md5_decrypt; + octo_calc_hash(0, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; case CRYPTO_SHA1_HMAC: - (*ocd)->octo_encrypt = octo_aes_cbc_sha1_encrypt; - (*ocd)->octo_decrypt = octo_aes_cbc_sha1_decrypt; - octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_aes_cbc_sha1_encrypt; + ocd->octo_decrypt = octo_aes_cbc_sha1_decrypt; + octo_calc_hash(1, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; case -1: - (*ocd)->octo_encrypt = octo_aes_cbc_encrypt; - (*ocd)->octo_decrypt = octo_aes_cbc_decrypt; + ocd->octo_encrypt = octo_aes_cbc_encrypt; + ocd->octo_decrypt = octo_aes_cbc_decrypt; break; default: - cryptocteon_freesession(NULL, i); dprintf("%s,%d: EINVALn", __FILE__, __LINE__); return EINVAL; } break; case CRYPTO_MD5_HMAC: - (*ocd)->octo_encrypt = octo_null_md5_encrypt; - (*ocd)->octo_decrypt = octo_null_md5_encrypt; - octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_null_md5_encrypt; + ocd->octo_decrypt = octo_null_md5_encrypt; + octo_calc_hash(0, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; case CRYPTO_SHA1_HMAC: - (*ocd)->octo_encrypt = octo_null_sha1_encrypt; - (*ocd)->octo_decrypt = octo_null_sha1_encrypt; - octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner, - (*ocd)->octo_hmouter); + ocd->octo_encrypt = octo_null_sha1_encrypt; + ocd->octo_decrypt = octo_null_sha1_encrypt; + octo_calc_hash(1, macini->cri_key, ocd->octo_hminner, + ocd->octo_hmouter); break; default: - cryptocteon_freesession(NULL, i); dprintf("%s,%d: EINVALn", __FILE__, __LINE__); return EINVAL; } - (*ocd)->octo_encalg = encini ? encini->cri_alg : -1; - (*ocd)->octo_macalg = macini ? macini->cri_alg : -1; + ocd->octo_encalg = encini ? encini->cri_alg : -1; + ocd->octo_macalg = macini ? macini->cri_alg : -1; - return 0; + return (0); } /* - * Free a session. - */ -static int -cryptocteon_freesession(device_t dev, u_int64_t tid) -{ - struct cryptocteon_softc *sc; - u_int32_t sid = CRYPTO_SESID2LID(tid); - - sc = device_get_softc(dev); - - if (sc == NULL) - return (EINVAL); - - if (sid > sc->sc_sesnum || sc->sc_sessions == NULL || - sc->sc_sessions[sid] == NULL) - return (EINVAL); - - /* Silently accept and return */ - if (sid == 0) - return(0); - - if (sc->sc_sessions[sid]) - free(sc->sc_sessions[sid], M_DEVBUF); - sc->sc_sessions[sid] = NULL; - return 0; -} - -/* * Process a request. */ static int cryptocteon_process(device_t dev, struct cryptop *crp, int hint) { struct cryptodesc *crd; struct octo_sess *od; - u_int32_t lid; size_t iovcnt, iovlen; struct mbuf *m = NULL; struct uio *uiop = NULL; struct cryptodesc *enccrd = NULL, *maccrd = NULL; unsigned char *ivp = NULL; unsigned char iv_data[HASH_MAX_LEN]; int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0; struct cryptocteon_softc *sc; sc = device_get_softc(dev); if (sc == NULL || crp == NULL) return EINVAL; crp->crp_etype = 0; if (crp->crp_desc == NULL || crp->crp_buf == NULL) { dprintf("%s,%d: EINVAL\n", __FILE__, __LINE__); crp->crp_etype = EINVAL; goto done; } - lid = crp->crp_sid & 0xffffffff; - if (lid >= sc->sc_sesnum || lid == 0 || sc->sc_sessions == NULL || - sc->sc_sessions[lid] == NULL) { - crp->crp_etype = ENOENT; - dprintf("%s,%d: ENOENT\n", __FILE__, __LINE__); - goto done; - } - od = sc->sc_sessions[lid]; + od = crypto_get_driver_session(crp->crp_session); /* * do some error checking outside of the loop for m and IOV processing * this leaves us with valid m or uiop pointers for later */ if (crp->crp_flags & CRYPTO_F_IMBUF) { unsigned frags; m = (struct mbuf *) crp->crp_buf; for (frags = 0; m != NULL; frags++) m = m->m_next; if (frags >= UIO_MAXIOV) { printf("%s,%d: %d frags > UIO_MAXIOV", __FILE__, __LINE__, frags); goto done; } m = (struct mbuf *) crp->crp_buf; } else if (crp->crp_flags & CRYPTO_F_IOV) { uiop = (struct uio *) crp->crp_buf; if (uiop->uio_iovcnt > UIO_MAXIOV) { printf("%s,%d: %d uio_iovcnt > UIO_MAXIOV", __FILE__, __LINE__, uiop->uio_iovcnt); goto done; } } /* point our enccrd and maccrd appropriately */ crd = crp->crp_desc; - if (crd->crd_alg == od->octo_encalg) enccrd = crd; - if (crd->crd_alg == od->octo_macalg) maccrd = crd; + if (crd->crd_alg == od->octo_encalg) + enccrd = crd; + if (crd->crd_alg == od->octo_macalg) + maccrd = crd; crd = crd->crd_next; if (crd) { - if (crd->crd_alg == od->octo_encalg) enccrd = crd; - if (crd->crd_alg == od->octo_macalg) maccrd = crd; + if (crd->crd_alg == od->octo_encalg) + enccrd = crd; + if (crd->crd_alg == od->octo_macalg) + maccrd = crd; crd = crd->crd_next; } if (crd) { crp->crp_etype = EINVAL; dprintf("%s,%d: ENOENT - descriptors do not match session\n", __FILE__, __LINE__); goto done; } if (enccrd) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { ivp = enccrd->crd_iv; } else { ivp = iv_data; crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp); } if (maccrd) { auth_off = maccrd->crd_skip; auth_len = maccrd->crd_len; icv_off = maccrd->crd_inject; } crypt_off = enccrd->crd_skip; crypt_len = enccrd->crd_len; } else { /* if (maccrd) */ auth_off = maccrd->crd_skip; auth_len = maccrd->crd_len; icv_off = maccrd->crd_inject; } /* * setup the I/O vector to cover the buffer */ if (crp->crp_flags & CRYPTO_F_IMBUF) { iovcnt = 0; iovlen = 0; while (m != NULL) { od->octo_iov[iovcnt].iov_base = mtod(m, void *); od->octo_iov[iovcnt].iov_len = m->m_len; m = m->m_next; iovlen += od->octo_iov[iovcnt++].iov_len; } } else if (crp->crp_flags & CRYPTO_F_IOV) { iovlen = 0; for (iovcnt = 0; iovcnt < uiop->uio_iovcnt; iovcnt++) { od->octo_iov[iovcnt].iov_base = uiop->uio_iov[iovcnt].iov_base; od->octo_iov[iovcnt].iov_len = uiop->uio_iov[iovcnt].iov_len; iovlen += od->octo_iov[iovcnt].iov_len; } } else { iovlen = crp->crp_ilen; od->octo_iov[0].iov_base = crp->crp_buf; od->octo_iov[0].iov_len = crp->crp_ilen; iovcnt = 1; } /* * setup a new explicit key */ if (enccrd) { if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) { od->octo_encklen = (enccrd->crd_klen + 7) / 8; memcpy(od->octo_enckey, enccrd->crd_key, od->octo_encklen); } } if (maccrd) { if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) { od->octo_macklen = (maccrd->crd_klen + 7) / 8; memcpy(od->octo_mackey, maccrd->crd_key, od->octo_macklen); od->octo_mackey_set = 0; } if (!od->octo_mackey_set) { octo_calc_hash(maccrd->crd_alg == CRYPTO_MD5_HMAC ? 0 : 1, maccrd->crd_key, od->octo_hminner, od->octo_hmouter); od->octo_mackey_set = 1; } } if (!enccrd || (enccrd->crd_flags & CRD_F_ENCRYPT)) (*od->octo_encrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); else (*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp); done: crypto_done(crp); - return 0; + return (0); } static device_method_t cryptocteon_methods[] = { /* device methods */ DEVMETHOD(device_identify, cryptocteon_identify), DEVMETHOD(device_probe, cryptocteon_probe), DEVMETHOD(device_attach, cryptocteon_attach), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, cryptocteon_newsession), - DEVMETHOD(cryptodev_freesession,cryptocteon_freesession), DEVMETHOD(cryptodev_process, cryptocteon_process), { 0, 0 } }; static driver_t cryptocteon_driver = { "cryptocteon", cryptocteon_methods, sizeof (struct cryptocteon_softc), }; static devclass_t cryptocteon_devclass; DRIVER_MODULE(cryptocteon, nexus, cryptocteon_driver, cryptocteon_devclass, 0, 0); Index: head/sys/mips/nlm/dev/sec/nlmrsa.c =================================================================== --- head/sys/mips/nlm/dev/sec/nlmrsa.c (revision 336438) +++ head/sys/mips/nlm/dev/sec/nlmrsa.c (revision 336439) @@ -1,557 +1,500 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NLM_RSA_DEBUG static void print_krp_params(struct cryptkop *krp); #endif static int xlp_rsa_init(struct xlp_rsa_softc *sc, int node); -static int xlp_rsa_newsession(device_t , uint32_t *, struct cryptoini *); -static int xlp_rsa_freesession(device_t , uint64_t); +static int xlp_rsa_newsession(device_t , crypto_session_t, struct cryptoini *); static int xlp_rsa_kprocess(device_t , struct cryptkop *, int); static int xlp_get_rsa_opsize(struct xlp_rsa_command *cmd, unsigned int bits); static void xlp_free_cmd_params(struct xlp_rsa_command *cmd); static int xlp_rsa_inp2hwformat(uint8_t *src, uint8_t *dst, uint32_t paramsize, uint8_t result); static int xlp_rsa_probe(device_t); static int xlp_rsa_attach(device_t); static int xlp_rsa_detach(device_t); static device_method_t xlp_rsa_methods[] = { /* device interface */ DEVMETHOD(device_probe, xlp_rsa_probe), DEVMETHOD(device_attach, xlp_rsa_attach), DEVMETHOD(device_detach, xlp_rsa_detach), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, xlp_rsa_newsession), - DEVMETHOD(cryptodev_freesession, xlp_rsa_freesession), DEVMETHOD(cryptodev_kprocess, xlp_rsa_kprocess), DEVMETHOD_END }; static driver_t xlp_rsa_driver = { "nlmrsa", xlp_rsa_methods, sizeof(struct xlp_rsa_softc) }; static devclass_t xlp_rsa_devclass; DRIVER_MODULE(nlmrsa, pci, xlp_rsa_driver, xlp_rsa_devclass, 0, 0); MODULE_DEPEND(nlmrsa, crypto, 1, 1, 1); #ifdef NLM_RSA_DEBUG static void print_krp_params(struct cryptkop *krp) { int i; printf("krp->krp_op :%d\n", krp->krp_op); printf("krp->krp_status :%d\n", krp->krp_status); printf("krp->krp_iparams:%d\n", krp->krp_iparams); printf("krp->krp_oparams:%d\n", krp->krp_oparams); for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { printf("krp->krp_param[%d].crp_p :0x%llx\n", i, (unsigned long long)krp->krp_param[i].crp_p); printf("krp->krp_param[%d].crp_nbits :%d\n", i, krp->krp_param[i].crp_nbits); printf("krp->krp_param[%d].crp_nbytes :%d\n", i, howmany(krp->krp_param[i].crp_nbits, 8)); } } #endif static int xlp_rsa_init(struct xlp_rsa_softc *sc, int node) { struct xlp_rsa_command *cmd = NULL; uint32_t fbvc, dstvc, endsel, regval; struct nlm_fmn_msg m; int err, ret, i; uint64_t base; /* Register interrupt handler for the RSA/ECC CMS messages */ if (register_msgring_handler(sc->rsaecc_vc_start, sc->rsaecc_vc_end, nlm_xlprsaecc_msgring_handler, sc) != 0) { err = -1; printf("Couldn't register rsa/ecc msgring handler\n"); goto errout; } fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC; /* Do the CMS credit initialization */ /* Currently it is configured by default to 50 when kernel comes up */ #if BYTE_ORDER == LITTLE_ENDIAN for (i = 0; i < nitems(nlm_rsa_ucode_data); i++) nlm_rsa_ucode_data[i] = htobe64(nlm_rsa_ucode_data[i]); #endif for (dstvc = sc->rsaecc_vc_start; dstvc <= sc->rsaecc_vc_end; dstvc++) { cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__)); cmd->rsasrc = contigmalloc(sizeof(nlm_rsa_ucode_data), M_DEVBUF, (M_WAITOK | M_ZERO), 0UL /* low address */, -1UL /* high address */, XLP_L2L3_CACHELINE_SIZE /* alignment */, 0UL /* boundary */); KASSERT(cmd->rsasrc != NULL, ("%s:cmd->rsasrc is NULL\n", __func__)); memcpy(cmd->rsasrc, nlm_rsa_ucode_data, sizeof(nlm_rsa_ucode_data)); m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, 0x70, 0, vtophys(cmd->rsasrc)); m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc, vtophys(cmd->rsasrc)); /* Software scratch pad */ m.msg[2] = (uintptr_t)cmd; m.msg[3] = 0; ret = nlm_fmn_msgsend(dstvc, 3, FMN_SWCODE_RSA, &m); if (ret != 0) { err = -1; printf("%s: msgsnd failed (%x)\n", __func__, ret); goto errout; } } /* Configure so that all VCs send request to all RSA pipes */ base = nlm_get_rsa_regbase(node); if (nlm_is_xlp3xx()) { endsel = 1; regval = 0xFFFF; } else { endsel = 3; regval = 0x07FFFFFF; } for (i = 0; i < endsel; i++) nlm_write_rsa_reg(base, RSA_ENG_SEL_0 + i, regval); return (0); errout: xlp_free_cmd_params(cmd); return (err); } /* This function is called from an interrupt handler */ void nlm_xlprsaecc_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data) { struct xlp_rsa_command *cmd; struct xlp_rsa_softc *sc; struct crparam *outparam; int ostart; KASSERT(code == FMN_SWCODE_RSA, ("%s: bad code = %d, expected code = %d\n", __func__, code, FMN_SWCODE_RSA)); sc = data; KASSERT(src_id >= sc->rsaecc_vc_start && src_id <= sc->rsaecc_vc_end, ("%s: bad src_id = %d, expect %d - %d\n", __func__, src_id, sc->rsaecc_vc_start, sc->rsaecc_vc_end)); cmd = (struct xlp_rsa_command *)(uintptr_t)msg->msg[1]; KASSERT(cmd != NULL, ("%s:cmd not received properly\n", __func__)); if (RSA_ERROR(msg->msg[0]) != 0) { printf("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __func__, (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1], (int)RSA_ERROR(msg->msg[0])); cmd->krp->krp_status = EBADMSG; } if (cmd->krp != NULL) { ostart = cmd->krp->krp_iparams; outparam = &cmd->krp->krp_param[ostart]; xlp_rsa_inp2hwformat(cmd->rsasrc + cmd->rsaopsize * ostart, outparam->crp_p, howmany(outparam->crp_nbits, 8), 1); crypto_kdone(cmd->krp); } xlp_free_cmd_params(cmd); } static int xlp_rsa_probe(device_t dev) { struct xlp_rsa_softc *sc; if (pci_get_vendor(dev) == PCI_VENDOR_NETLOGIC && pci_get_device(dev) == PCI_DEVICE_ID_NLM_RSA) { sc = device_get_softc(dev); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach an interface that successfully probed. */ static int xlp_rsa_attach(device_t dev) { struct xlp_rsa_softc *sc = device_get_softc(dev); uint64_t base; int qstart, qnum; int freq, node; sc->sc_dev = dev; node = nlm_get_device_node(pci_get_slot(dev)); freq = nlm_set_device_frequency(node, DFS_DEVICE_RSA, 250); if (bootverbose) device_printf(dev, "RSA Freq: %dMHz\n", freq); if (pci_get_device(dev) == PCI_DEVICE_ID_NLM_RSA) { device_set_desc(dev, "XLP RSA/ECC Accelerator"); - if ((sc->sc_cid = crypto_get_driverid(dev, - CRYPTOCAP_F_HARDWARE)) < 0) { + sc->sc_cid = crypto_get_driverid(dev, + sizeof(struct xlp_rsa_session), CRYPTOCAP_F_HARDWARE); + if (sc->sc_cid < 0) { printf("xlp_rsaecc-err:couldn't get the driver id\n"); goto error_exit; } if (crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0) != 0) goto error_exit; base = nlm_get_rsa_pcibase(node); qstart = nlm_qidstart(base); qnum = nlm_qnum(base); sc->rsaecc_vc_start = qstart; sc->rsaecc_vc_end = qstart + qnum - 1; } if (xlp_rsa_init(sc, node) != 0) goto error_exit; device_printf(dev, "RSA Initialization complete!\n"); return (0); error_exit: return (ENXIO); } /* * Detach an interface that successfully probed. */ static int xlp_rsa_detach(device_t dev) { return (0); } /* - * Allocate a new 'session' and return an encoded session id. 'sidp' - * contains our registration id, and should contain an encoded session - * id on successful allocation. + * Allocate a new 'session' (unused). */ static int -xlp_rsa_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +xlp_rsa_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct xlp_rsa_softc *sc = device_get_softc(dev); - struct xlp_rsa_session *ses = NULL; - int sesn; - if (sidp == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); - if (sc->sc_sessions == NULL) { - ses = sc->sc_sessions = malloc(sizeof(struct xlp_rsa_session), - M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - sesn = 0; - sc->sc_nsessions = 1; - } else { - for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { - if (!sc->sc_sessions[sesn].hs_used) { - ses = &sc->sc_sessions[sesn]; - break; - } - } - - if (ses == NULL) { - sesn = sc->sc_nsessions; - ses = malloc((sesn + 1) * sizeof(*ses), - M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); - bzero(sc->sc_sessions, sesn * sizeof(*ses)); - free(sc->sc_sessions, M_DEVBUF); - sc->sc_sessions = ses; - ses = &sc->sc_sessions[sesn]; - sc->sc_nsessions++; - } - } - bzero(ses, sizeof(*ses)); - ses->sessionid = sesn; - ses->hs_used = 1; - - *sidp = XLP_RSA_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* - * Deallocate a session. - * XXX this routine should run a zero'd mac/encrypt key into context ram. + * XXX freesession should run a zero'd mac/encrypt key into context ram. * XXX to blow away any keys already stored there. */ -static int -xlp_rsa_freesession(device_t dev, u_int64_t tid) -{ - struct xlp_rsa_softc *sc = device_get_softc(dev); - int session; - u_int32_t sid = CRYPTO_SESID2LID(tid); - - if (sc == NULL) - return (EINVAL); - - session = XLP_RSA_SESSION(sid); - if (session >= sc->sc_nsessions) - return (EINVAL); - - sc->sc_sessions[session].hs_used = 0; - return (0); -} static void xlp_free_cmd_params(struct xlp_rsa_command *cmd) { if (cmd == NULL) return; if (cmd->rsasrc != NULL) { if (cmd->krp == NULL) /* Micro code load */ contigfree(cmd->rsasrc, sizeof(nlm_rsa_ucode_data), M_DEVBUF); else free(cmd->rsasrc, M_DEVBUF); } free(cmd, M_DEVBUF); } static int xlp_get_rsa_opsize(struct xlp_rsa_command *cmd, unsigned int bits) { if (bits == 0 || bits > 8192) return (-1); /* XLP hardware expects always a fixed size with unused bytes * zeroed out in the input data */ if (bits <= 512) { cmd->rsatype = 0x40; cmd->rsaopsize = 64; } else if (bits <= 1024) { cmd->rsatype = 0x41; cmd->rsaopsize = 128; } else if (bits <= 2048) { cmd->rsatype = 0x42; cmd->rsaopsize = 256; } else if (bits <= 4096) { cmd->rsatype = 0x43; cmd->rsaopsize = 512; } else if (bits <= 8192) { cmd->rsatype = 0x44; cmd->rsaopsize = 1024; } return (0); } static int xlp_rsa_inp2hwformat(uint8_t *src, uint8_t *dst, uint32_t paramsize, uint8_t result) { uint32_t pdwords, pbytes; int i, j, k; pdwords = paramsize / 8; pbytes = paramsize % 8; for (i = 0, k = 0; i < pdwords; i++) { /* copy dwords of inp/hw to hw/out format */ for (j = 7; j >= 0; j--, k++) dst[i * 8 + j] = src[k]; } if (pbytes) { if (result == 0) { /* copy rem bytes of input data to hw format */ for (j = 7; k < paramsize; j--, k++) dst[i * 8 + j] = src[k]; } else { /* copy rem bytes of hw data to exp output format */ for (j = 7; k < paramsize; j--, k++) dst[k] = src[i * 8 + j]; } } return (0); } static int nlm_crypto_complete_rsa_request(struct xlp_rsa_softc *sc, struct xlp_rsa_command *cmd) { unsigned int fbvc; struct nlm_fmn_msg m; int ret; fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC; m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, cmd->rsatype, cmd->rsafn, vtophys(cmd->rsasrc)); m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc, vtophys(cmd->rsasrc + cmd->rsaopsize * cmd->krp->krp_iparams)); /* Software scratch pad */ m.msg[2] = (uintptr_t)cmd; m.msg[3] = 0; /* Send the message to rsa engine vc */ ret = nlm_fmn_msgsend(sc->rsaecc_vc_start, 3, FMN_SWCODE_RSA, &m); if (ret != 0) { #ifdef NLM_SEC_DEBUG printf("%s: msgsnd failed (%x)\n", __func__, ret); #endif return (ERESTART); } return (0); } static int xlp_rsa_kprocess(device_t dev, struct cryptkop *krp, int hint) { struct xlp_rsa_softc *sc = device_get_softc(dev); struct xlp_rsa_command *cmd; struct crparam *kp; int err, i; if (krp == NULL || krp->krp_callback == NULL) return (EINVAL); cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__)); cmd->krp = krp; #ifdef NLM_RSA_DEBUG print_krp_params(krp); #endif err = EOPNOTSUPP; switch (krp->krp_op) { case CRK_MOD_EXP: if (krp->krp_iparams == 3 && krp->krp_oparams == 1) break; goto errout; default: device_printf(dev, "Op:%d not yet supported\n", krp->krp_op); goto errout; } err = xlp_get_rsa_opsize(cmd, krp->krp_param[krp->krp_iparams - 1].crp_nbits); if (err != 0) { err = EINVAL; goto errout; } cmd->rsafn = 0; /* Mod Exp */ cmd->rsasrc = malloc( cmd->rsaopsize * (krp->krp_iparams + krp->krp_oparams), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd->rsasrc == NULL) { err = ENOMEM; goto errout; } for (i = 0, kp = krp->krp_param; i < krp->krp_iparams; i++, kp++) { KASSERT(kp->crp_nbits != 0, ("%s: parameter[%d]'s length is zero\n", __func__, i)); xlp_rsa_inp2hwformat(kp->crp_p, cmd->rsasrc + i * cmd->rsaopsize, howmany(kp->crp_nbits, 8), 0); } err = nlm_crypto_complete_rsa_request(sc, cmd); if (err != 0) goto errout; return (0); errout: xlp_free_cmd_params(cmd); krp->krp_status = err; crypto_kdone(krp); return (err); } Index: head/sys/mips/nlm/dev/sec/nlmrsalib.h =================================================================== --- head/sys/mips/nlm/dev/sec/nlmrsalib.h (revision 336438) +++ head/sys/mips/nlm/dev/sec/nlmrsalib.h (revision 336439) @@ -1,72 +1,64 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NLMRSALIB_H_ #define _NLMRSALIB_H_ -#define XLP_RSA_SESSION(sid) ((sid) & 0x000007ff) -#define XLP_RSA_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff)) - #define RSA_ERROR(msg0) (((msg0) >> 53) & 0x1f) struct xlp_rsa_session { - uint32_t sessionid; - int hs_used; }; struct xlp_rsa_command { - uint16_t session_num; struct xlp_rsa_session *ses; struct cryptkop *krp; uint8_t *rsasrc; uint32_t rsaopsize; uint32_t rsatype; uint32_t rsafn; }; /* * Holds data specific to nlm security accelerators */ struct xlp_rsa_softc { device_t sc_dev; /* device backpointer */ uint64_t rsa_base; int sc_cid; - struct xlp_rsa_session *sc_sessions; - int sc_nsessions; int rsaecc_vc_start; int rsaecc_vc_end; }; void nlm_xlprsaecc_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data); #endif /* _NLMRSALIB_H_ */ Index: head/sys/mips/nlm/dev/sec/nlmsec.c =================================================================== --- head/sys/mips/nlm/dev/sec/nlmsec.c (revision 336438) +++ head/sys/mips/nlm/dev/sec/nlmsec.c (revision 336439) @@ -1,852 +1,792 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include #include #include #include #include #include #include #include #include #include unsigned int creditleft; void xlp_sec_print_data(struct cryptop *crp); static int xlp_sec_init(struct xlp_sec_softc *sc); -static int xlp_sec_newsession(device_t , uint32_t *, struct cryptoini *); -static int xlp_sec_freesession(device_t , uint64_t); +static int xlp_sec_newsession(device_t , crypto_session_t, struct cryptoini *); static int xlp_sec_process(device_t , struct cryptop *, int); static int xlp_copyiv(struct xlp_sec_softc *, struct xlp_sec_command *, struct cryptodesc *enccrd); static int xlp_get_nsegs(struct cryptop *, unsigned int *); static int xlp_alloc_cmd_params(struct xlp_sec_command *, unsigned int); static void xlp_free_cmd_params(struct xlp_sec_command *); static int xlp_sec_probe(device_t); static int xlp_sec_attach(device_t); static int xlp_sec_detach(device_t); static device_method_t xlp_sec_methods[] = { /* device interface */ DEVMETHOD(device_probe, xlp_sec_probe), DEVMETHOD(device_attach, xlp_sec_attach), DEVMETHOD(device_detach, xlp_sec_detach), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* crypto device methods */ DEVMETHOD(cryptodev_newsession, xlp_sec_newsession), - DEVMETHOD(cryptodev_freesession,xlp_sec_freesession), DEVMETHOD(cryptodev_process, xlp_sec_process), DEVMETHOD_END }; static driver_t xlp_sec_driver = { "nlmsec", xlp_sec_methods, sizeof(struct xlp_sec_softc) }; static devclass_t xlp_sec_devclass; DRIVER_MODULE(nlmsec, pci, xlp_sec_driver, xlp_sec_devclass, 0, 0); MODULE_DEPEND(nlmsec, crypto, 1, 1, 1); void nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data); #ifdef NLM_SEC_DEBUG #define extract_bits(x, bitshift, bitcnt) \ (((unsigned long long)x >> bitshift) & ((1ULL << bitcnt) - 1)) void print_crypto_params(struct xlp_sec_command *cmd, struct nlm_fmn_msg m) { unsigned long long msg0,msg1,msg2,msg3,msg4,msg5,msg6,msg7,msg8; msg0 = cmd->ctrlp->desc0; msg1 = cmd->paramp->desc0; msg2 = cmd->paramp->desc1; msg3 = cmd->paramp->desc2; msg4 = cmd->paramp->desc3; msg5 = cmd->paramp->segment[0][0]; msg6 = cmd->paramp->segment[0][1]; msg7 = m.msg[0]; msg8 = m.msg[1]; printf("msg0 %llx msg1 %llx msg2 %llx msg3 %llx msg4 %llx msg5 %llx" "msg6 %llx msg7 %llx msg8 %llx\n", msg0, msg1, msg2, msg3, msg4, msg5, msg6, msg7, msg8); printf("c0: hmac %d htype %d hmode %d ctype %d cmode %d arc4 %x\n", (unsigned int)extract_bits(msg0, 61, 1), (unsigned int)extract_bits(msg0, 52, 8), (unsigned int)extract_bits(msg0, 43, 8), (unsigned int)extract_bits(msg0, 34, 8), (unsigned int)extract_bits(msg0, 25, 8), (unsigned int)extract_bits(msg0, 0, 23)); printf("p0: tls %d hsrc %d hl3 %d enc %d ivl %d hd %llx\n", (unsigned int)extract_bits(msg1, 63, 1), (unsigned int)extract_bits(msg1,62,1), (unsigned int)extract_bits(msg1,60,1), (unsigned int)extract_bits(msg1,59,1), (unsigned int)extract_bits(msg1,41,16), extract_bits(msg1,0,40)); printf("p1: clen %u hl %u\n", (unsigned int)extract_bits(msg2, 32, 32), (unsigned int)extract_bits(msg2,0,32)); printf("p2: ivoff %d cbit %d coff %d hbit %d hclb %d hoff %d\n", (unsigned int)extract_bits(msg3, 45, 17), (unsigned int)extract_bits(msg3, 42,3), (unsigned int)extract_bits(msg3, 22,16), (unsigned int)extract_bits(msg3, 19,3), (unsigned int)extract_bits(msg3, 18,1), (unsigned int)extract_bits(msg3, 0, 16)); printf("p3: desfbid %d tlen %d arc4 %x hmacpad %d\n", (unsigned int)extract_bits(msg4, 48,16), (unsigned int)extract_bits(msg4,11,16), (unsigned int)extract_bits(msg4,6,3), (unsigned int)extract_bits(msg4,5,1)); printf("p4: sflen %d sddr %llx \n", (unsigned int)extract_bits(msg5, 48, 16),extract_bits(msg5, 0, 40)); printf("p5: dflen %d cl3 %d cclob %d cdest %llx \n", (unsigned int)extract_bits(msg6, 48, 16), (unsigned int)extract_bits(msg6, 46, 1), (unsigned int)extract_bits(msg6, 41, 1), extract_bits(msg6, 0, 40)); printf("fmn0: fbid %d dfrlen %d dfrv %d cklen %d cdescaddr %llx\n", (unsigned int)extract_bits(msg7, 48, 16), (unsigned int)extract_bits(msg7,46,2), (unsigned int)extract_bits(msg7,45,1), (unsigned int)extract_bits(msg7,40,5), (extract_bits(msg7,0,34)<< 6)); printf("fmn1: arc4 %d hklen %d pdesclen %d pktdescad %llx\n", (unsigned int)extract_bits(msg8, 63, 1), (unsigned int)extract_bits(msg8,56,5), (unsigned int)extract_bits(msg8,43,12), (extract_bits(msg8,0,34) << 6)); return; } void xlp_sec_print_data(struct cryptop *crp) { int i, key_len; struct cryptodesc *crp_desc; - printf("session id = 0x%llx, crp_ilen = %d, crp_olen=%d \n", - crp->crp_sid, crp->crp_ilen, crp->crp_olen); + printf("session = %p, crp_ilen = %d, crp_olen=%d \n", crp->crp_session, + crp->crp_ilen, crp->crp_olen); printf("crp_flags = 0x%x\n", crp->crp_flags); printf("crp buf:\n"); for (i = 0; i < crp->crp_ilen; i++) { printf("%c ", crp->crp_buf[i]); if (i % 10 == 0) printf("\n"); } printf("\n"); printf("****************** desc ****************\n"); crp_desc = crp->crp_desc; printf("crd_skip=%d, crd_len=%d, crd_flags=0x%x, crd_alg=%d\n", crp_desc->crd_skip, crp_desc->crd_len, crp_desc->crd_flags, crp_desc->crd_alg); key_len = crp_desc->crd_klen / 8; printf("key(%d) :\n", key_len); for (i = 0; i < key_len; i++) printf("%d", crp_desc->crd_key[i]); printf("\n"); printf(" IV : \n"); for (i = 0; i < EALG_MAX_BLOCK_LEN; i++) printf("%d", crp_desc->crd_iv[i]); printf("\n"); printf("crd_next=%p\n", crp_desc->crd_next); return; } void print_cmd(struct xlp_sec_command *cmd) { printf("session_num :%d\n",cmd->session_num); printf("crp :0x%x\n",(uint32_t)cmd->crp); printf("enccrd :0x%x\n",(uint32_t)cmd->enccrd); printf("maccrd :0x%x\n",(uint32_t)cmd->maccrd); printf("ses :%d\n",(uint32_t)cmd->ses); printf("ctrlp :0x%x\n",(uint32_t)cmd->ctrlp); printf("paramp :0x%x\n",(uint32_t)cmd->paramp); printf("hashdest :0x%x\n",(uint32_t)cmd->hashdest); printf("hashsrc :%d\n",cmd->hashsrc); printf("hmacpad :%d\n",cmd->hmacpad); printf("hashoff :%d\n",cmd->hashoff); printf("hashlen :%d\n",cmd->hashlen); printf("cipheroff :%d\n",cmd->cipheroff); printf("cipherlen :%d\n",cmd->cipherlen); printf("ivoff :%d\n",cmd->ivoff); printf("ivlen :%d\n",cmd->ivlen); printf("hashalg :%d\n",cmd->hashalg); printf("hashmode :%d\n",cmd->hashmode); printf("cipheralg :%d\n",cmd->cipheralg); printf("ciphermode :%d\n",cmd->ciphermode); printf("nsegs :%d\n",cmd->nsegs); printf("hash_dst_len :%d\n",cmd->hash_dst_len); } #endif /* NLM_SEC_DEBUG */ static int xlp_sec_init(struct xlp_sec_softc *sc) { /* Register interrupt handler for the SEC CMS messages */ if (register_msgring_handler(sc->sec_vc_start, sc->sec_vc_end, nlm_xlpsec_msgring_handler, sc) != 0) { printf("Couldn't register sec msgring handler\n"); return (-1); } /* Do the CMS credit initialization */ /* Currently it is configured by default to 50 when kernel comes up */ return (0); } /* This function is called from an interrupt handler */ void nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data) { struct xlp_sec_command *cmd = NULL; struct xlp_sec_softc *sc = NULL; struct cryptodesc *crd = NULL; unsigned int ivlen = 0; KASSERT(code == FMN_SWCODE_CRYPTO, ("%s: bad code = %d, expected code = %d\n", __FUNCTION__, code, FMN_SWCODE_CRYPTO)); sc = (struct xlp_sec_softc *)data; KASSERT(src_id >= sc->sec_vc_start && src_id <= sc->sec_vc_end, ("%s: bad src_id = %d, expect %d - %d\n", __FUNCTION__, src_id, sc->sec_vc_start, sc->sec_vc_end)); cmd = (struct xlp_sec_command *)(uintptr_t)msg->msg[0]; KASSERT(cmd != NULL && cmd->crp != NULL, ("%s :cmd not received properly\n",__FUNCTION__)); KASSERT(CRYPTO_ERROR(msg->msg[1]) == 0, ("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __FUNCTION__, (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1], (int)CRYPTO_ERROR(msg->msg[1]))); crd = cmd->enccrd; /* Copy the last 8 or 16 bytes to the session iv, so that in few * cases this will be used as IV for the next request */ if (crd != NULL) { if ((crd->crd_alg == CRYPTO_DES_CBC || crd->crd_alg == CRYPTO_3DES_CBC || crd->crd_alg == CRYPTO_AES_CBC) && (crd->crd_flags & CRD_F_ENCRYPT)) { ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH); crypto_copydata(cmd->crp->crp_flags, cmd->crp->crp_buf, crd->crd_skip + crd->crd_len - ivlen, ivlen, - sc->sc_sessions[cmd->session_num].ses_iv); + cmd->ses->ses_iv); } } /* If there are not enough credits to send, then send request * will fail with ERESTART and the driver will be blocked until it is * unblocked here after knowing that there are sufficient credits to * send the request again. */ if (sc->sc_needwakeup) { atomic_add_int(&creditleft, sc->sec_msgsz); if (creditleft >= (NLM_CRYPTO_LEFT_REQS)) { crypto_unblock(sc->sc_cid, sc->sc_needwakeup); sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ)); } } if(cmd->maccrd) { crypto_copyback(cmd->crp->crp_flags, cmd->crp->crp_buf, cmd->maccrd->crd_inject, cmd->hash_dst_len, cmd->hashdest); } /* This indicates completion of the crypto operation */ crypto_done(cmd->crp); xlp_free_cmd_params(cmd); return; } static int xlp_sec_probe(device_t dev) { struct xlp_sec_softc *sc; if (pci_get_vendor(dev) == PCI_VENDOR_NETLOGIC && pci_get_device(dev) == PCI_DEVICE_ID_NLM_SAE) { sc = device_get_softc(dev); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach an interface that successfully probed. */ static int xlp_sec_attach(device_t dev) { struct xlp_sec_softc *sc = device_get_softc(dev); uint64_t base; int qstart, qnum; int freq, node; sc->sc_dev = dev; node = nlm_get_device_node(pci_get_slot(dev)); freq = nlm_set_device_frequency(node, DFS_DEVICE_SAE, 250); if (bootverbose) device_printf(dev, "SAE Freq: %dMHz\n", freq); if(pci_get_device(dev) == PCI_DEVICE_ID_NLM_SAE) { device_set_desc(dev, "XLP Security Accelerator"); - sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); + sc->sc_cid = crypto_get_driverid(dev, + sizeof(struct xlp_sec_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printf("xlp_sec - error : could not get the driver" " id\n"); goto error_exit; } if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_3DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_AES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0) != 0) printf("register failed for CRYPTO_ARC4\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0) != 0) printf("register failed for CRYPTO_MD5\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0) != 0) printf("register failed for CRYPTO_SHA1\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_MD5_HMAC\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_SHA1_HMAC\n"); base = nlm_get_sec_pcibase(node); qstart = nlm_qidstart(base); qnum = nlm_qnum(base); sc->sec_vc_start = qstart; sc->sec_vc_end = qstart + qnum - 1; } if (xlp_sec_init(sc) != 0) goto error_exit; if (bootverbose) device_printf(dev, "SEC Initialization complete!\n"); return (0); error_exit: return (ENXIO); } /* * Detach an interface that successfully probed. */ static int xlp_sec_detach(device_t dev) { return (0); } -/* - * Allocate a new 'session' and return an encoded session id. 'sidp' - * contains our registration id, and should contain an encoded session - * id on successful allocation. - */ static int -xlp_sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) +xlp_sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { struct cryptoini *c; struct xlp_sec_softc *sc = device_get_softc(dev); - int mac = 0, cry = 0, sesn; - struct xlp_sec_session *ses = NULL; + int mac = 0, cry = 0; + struct xlp_sec_session *ses; struct xlp_sec_command *cmd = NULL; - if (sidp == NULL || cri == NULL || sc == NULL) + if (cri == NULL || sc == NULL) return (EINVAL); - if (sc->sc_sessions == NULL) { - ses = sc->sc_sessions = malloc(sizeof(struct xlp_sec_session), - M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - sesn = 0; - sc->sc_nsessions = 1; - } else { - for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { - if (!sc->sc_sessions[sesn].hs_used) { - ses = &sc->sc_sessions[sesn]; - break; - } - } - - if (ses == NULL) { - sesn = sc->sc_nsessions; - ses = malloc((sesn + 1)*sizeof(struct xlp_sec_session), - M_DEVBUF, M_NOWAIT); - if (ses == NULL) - return (ENOMEM); - bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); - bzero(sc->sc_sessions, sesn * sizeof(*ses)); - free(sc->sc_sessions, M_DEVBUF); - sc->sc_sessions = ses; - ses = &sc->sc_sessions[sesn]; - sc->sc_nsessions++; - } - } - bzero(ses, sizeof(*ses)); - ses->sessionid = sesn; + ses = crypto_get_driver_session(cses); cmd = &ses->cmd; - ses->hs_used = 1; for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (mac) return (EINVAL); mac = 1; ses->hs_mlen = c->cri_mlen; if (ses->hs_mlen == 0) { switch (c->cri_alg) { case CRYPTO_MD5: case CRYPTO_MD5_HMAC: ses->hs_mlen = 16; break; case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: ses->hs_mlen = 20; break; } } break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_AES_CBC: /* XXX this may read fewer, does it matter? */ read_random(ses->ses_iv, c->cri_alg == CRYPTO_AES_CBC ? XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH); /* FALLTHROUGH */ case CRYPTO_ARC4: if (cry) return (EINVAL); cry = 1; break; default: return (EINVAL); } } if (mac == 0 && cry == 0) return (EINVAL); cmd->hash_dst_len = ses->hs_mlen; - *sidp = XLP_SEC_SID(device_get_unit(sc->sc_dev), sesn); return (0); } /* - * Deallocate a session. - * XXX this routine should run a zero'd mac/encrypt key into context ram. - * XXX to blow away any keys already stored there. + * XXX freesession routine should run a zero'd mac/encrypt key into context + * ram. to blow away any keys already stored there. */ -static int -xlp_sec_freesession(device_t dev, u_int64_t tid) -{ - struct xlp_sec_softc *sc = device_get_softc(dev); - int session; - u_int32_t sid = CRYPTO_SESID2LID(tid); - if (sc == NULL) - return (EINVAL); - - session = XLP_SEC_SESSION(sid); - if (session >= sc->sc_nsessions) - return (EINVAL); - - sc->sc_sessions[session].hs_used = 0; - return (0); -} - static int xlp_copyiv(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd, struct cryptodesc *enccrd) { unsigned int ivlen = 0; - int session; struct cryptop *crp = NULL; crp = cmd->crp; - session = cmd->session_num; if (enccrd->crd_alg != CRYPTO_ARC4) { ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH); if (enccrd->crd_flags & CRD_F_ENCRYPT) { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { bcopy(enccrd->crd_iv, cmd->iv, ivlen); } else { - bcopy(sc->sc_sessions[session].ses_iv, cmd->iv, - ivlen); + bcopy(cmd->ses->ses_iv, cmd->iv, ivlen); } if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } else { if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) { bcopy(enccrd->crd_iv, cmd->iv, ivlen); } else { crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_inject, ivlen, cmd->iv); } } } return (0); } static int xlp_get_nsegs(struct cryptop *crp, unsigned int *nsegs) { if (crp->crp_flags & CRYPTO_F_IMBUF) { struct mbuf *m = NULL; m = (struct mbuf *)crp->crp_buf; while (m != NULL) { *nsegs += NLM_CRYPTO_NUM_SEGS_REQD(m->m_len); m = m->m_next; } } else if (crp->crp_flags & CRYPTO_F_IOV) { struct uio *uio = NULL; struct iovec *iov = NULL; int iol = 0; uio = (struct uio *)crp->crp_buf; iov = (struct iovec *)uio->uio_iov; iol = uio->uio_iovcnt; while (iol > 0) { *nsegs += NLM_CRYPTO_NUM_SEGS_REQD(iov->iov_len); iol--; iov++; } } else { *nsegs = NLM_CRYPTO_NUM_SEGS_REQD(crp->crp_ilen); } return (0); } static int xlp_alloc_cmd_params(struct xlp_sec_command *cmd, unsigned int nsegs) { int err = 0; if(cmd == NULL) { err = EINVAL; goto error; } if ((cmd->ctrlp = malloc(sizeof(struct nlm_crypto_pkt_ctrl), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if (((uintptr_t)cmd->ctrlp & (XLP_L2L3_CACHELINE_SIZE - 1))) { err = EINVAL; goto error; } /* (nsegs - 1) because one seg is part of the structure already */ if ((cmd->paramp = malloc(sizeof(struct nlm_crypto_pkt_param) + (16 * (nsegs - 1)), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if (((uintptr_t)cmd->paramp & (XLP_L2L3_CACHELINE_SIZE - 1))) { err = EINVAL; goto error; } if ((cmd->iv = malloc(EALG_MAX_BLOCK_LEN, M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if ((cmd->hashdest = malloc(HASH_MAX_LEN, M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } error: return (err); } static void xlp_free_cmd_params(struct xlp_sec_command *cmd) { if (cmd->ctrlp != NULL) free(cmd->ctrlp, M_DEVBUF); if (cmd->paramp != NULL) free(cmd->paramp, M_DEVBUF); if (cmd->iv != NULL) free(cmd->iv, M_DEVBUF); if (cmd->hashdest != NULL) free(cmd->hashdest, M_DEVBUF); if (cmd != NULL) free(cmd, M_DEVBUF); return; } static int xlp_sec_process(device_t dev, struct cryptop *crp, int hint) { struct xlp_sec_softc *sc = device_get_softc(dev); struct xlp_sec_command *cmd = NULL; - int session, err = -1, ret = 0; + int err = -1, ret = 0; struct cryptodesc *crd1, *crd2; struct xlp_sec_session *ses; unsigned int nsegs = 0; if (crp == NULL || crp->crp_callback == NULL) { return (EINVAL); } - session = XLP_SEC_SESSION(crp->crp_sid); - if (sc == NULL || session >= sc->sc_nsessions) { + if (sc == NULL) { err = EINVAL; goto errout; } - ses = &sc->sc_sessions[session]; + ses = crypto_get_driver_session(crp->crp_session); if ((cmd = malloc(sizeof(struct xlp_sec_command), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto errout; } cmd->crp = crp; - cmd->session_num = session; + cmd->ses = ses; cmd->hash_dst_len = ses->hs_mlen; if ((crd1 = crp->crp_desc) == NULL) { err = EINVAL; goto errout; } crd2 = crd1->crd_next; if ((ret = xlp_get_nsegs(crp, &nsegs)) != 0) { err = EINVAL; goto errout; } if (((crd1 != NULL) && (crd1->crd_flags & CRD_F_IV_EXPLICIT)) || ((crd2 != NULL) && (crd2->crd_flags & CRD_F_IV_EXPLICIT))) { /* Since IV is given as separate segment to avoid copy */ nsegs += 1; } cmd->nsegs = nsegs; if ((err = xlp_alloc_cmd_params(cmd, nsegs)) != 0) goto errout; if ((crd1 != NULL) && (crd2 == NULL)) { if (crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC || crd1->crd_alg == CRYPTO_ARC4) { cmd->enccrd = crd1; cmd->maccrd = NULL; if ((ret = nlm_get_cipher_param(cmd)) != 0) { err = EINVAL; goto errout; } if (crd1->crd_flags & CRD_F_IV_EXPLICIT) cmd->cipheroff = cmd->ivlen; else cmd->cipheroff = cmd->enccrd->crd_skip; cmd->cipherlen = cmd->enccrd->crd_len; if (crd1->crd_flags & CRD_F_IV_PRESENT) cmd->ivoff = 0; else cmd->ivoff = cmd->enccrd->crd_inject; if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0) goto errout; if ((err = nlm_crypto_do_cipher(sc, cmd)) != 0) goto errout; } else if (crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_SHA1 || crd1->crd_alg == CRYPTO_MD5) { cmd->enccrd = NULL; cmd->maccrd = crd1; if ((ret = nlm_get_digest_param(cmd)) != 0) { err = EINVAL; goto errout; } cmd->hashoff = cmd->maccrd->crd_skip; cmd->hashlen = cmd->maccrd->crd_len; cmd->hmacpad = 0; cmd->hashsrc = 0; if ((err = nlm_crypto_do_digest(sc, cmd)) != 0) goto errout; } else { err = EINVAL; goto errout; } } else if( (crd1 != NULL) && (crd2 != NULL) ) { if ((crd1->crd_alg == CRYPTO_MD5_HMAC || crd1->crd_alg == CRYPTO_SHA1_HMAC || crd1->crd_alg == CRYPTO_MD5 || crd1->crd_alg == CRYPTO_SHA1) && (crd2->crd_alg == CRYPTO_DES_CBC || crd2->crd_alg == CRYPTO_3DES_CBC || crd2->crd_alg == CRYPTO_AES_CBC || crd2->crd_alg == CRYPTO_ARC4)) { cmd->maccrd = crd1; cmd->enccrd = crd2; } else if ((crd1->crd_alg == CRYPTO_DES_CBC || crd1->crd_alg == CRYPTO_ARC4 || crd1->crd_alg == CRYPTO_3DES_CBC || crd1->crd_alg == CRYPTO_AES_CBC) && (crd2->crd_alg == CRYPTO_MD5_HMAC || crd2->crd_alg == CRYPTO_SHA1_HMAC || crd2->crd_alg == CRYPTO_MD5 || crd2->crd_alg == CRYPTO_SHA1)) { cmd->enccrd = crd1; cmd->maccrd = crd2; } else { err = EINVAL; goto errout; } if ((ret = nlm_get_cipher_param(cmd)) != 0) { err = EINVAL; goto errout; } if ((ret = nlm_get_digest_param(cmd)) != 0) { err = EINVAL; goto errout; } cmd->ivoff = cmd->enccrd->crd_inject; cmd->hashoff = cmd->maccrd->crd_skip; cmd->hashlen = cmd->maccrd->crd_len; cmd->hmacpad = 0; if (cmd->enccrd->crd_flags & CRD_F_ENCRYPT) cmd->hashsrc = 1; else cmd->hashsrc = 0; cmd->cipheroff = cmd->enccrd->crd_skip; cmd->cipherlen = cmd->enccrd->crd_len; if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0) goto errout; if ((err = nlm_crypto_do_cipher_digest(sc, cmd)) != 0) goto errout; } else { err = EINVAL; goto errout; } return (0); errout: xlp_free_cmd_params(cmd); if (err == ERESTART) { sc->sc_needwakeup |= CRYPTO_SYMQ; creditleft = 0; return (err); } crp->crp_etype = err; crypto_done(crp); return (err); } Index: head/sys/mips/nlm/dev/sec/nlmseclib.h =================================================================== --- head/sys/mips/nlm/dev/sec/nlmseclib.h (revision 336438) +++ head/sys/mips/nlm/dev/sec/nlmseclib.h (revision 336439) @@ -1,159 +1,151 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NLMSECLIB_H_ #define _NLMSECLIB_H_ /* * Cryptographic parameter definitions */ #define XLP_SEC_DES_KEY_LENGTH 8 /* Bytes */ #define XLP_SEC_3DES_KEY_LENGTH 24 /* Bytes */ #define XLP_SEC_AES128_KEY_LENGTH 16 /* Bytes */ #define XLP_SEC_AES192_KEY_LENGTH 24 /* Bytes */ #define XLP_SEC_AES256_KEY_LENGTH 32 /* Bytes */ #define XLP_SEC_AES128F8_KEY_LENGTH 32 /* Bytes */ #define XLP_SEC_AES192F8_KEY_LENGTH 48 /* Bytes */ #define XLP_SEC_AES256F8_KEY_LENGTH 64 /* Bytes */ #define XLP_SEC_KASUMI_F8_KEY_LENGTH 16 /* Bytes */ #define XLP_SEC_MAX_CRYPT_KEY_LENGTH XLP_SEC_AES256F8_KEY_LENGTH #define XLP_SEC_DES_IV_LENGTH 8 /* Bytes */ #define XLP_SEC_AES_IV_LENGTH 16 /* Bytes */ #define XLP_SEC_ARC4_IV_LENGTH 0 /* Bytes */ #define XLP_SEC_KASUMI_F8_IV_LENGTH 16 /* Bytes */ #define XLP_SEC_MAX_IV_LENGTH 16 /* Bytes */ #define XLP_SEC_IV_LENGTH_BYTES 8 /* Bytes */ #define XLP_SEC_AES_BLOCK_SIZE 16 /* Bytes */ #define XLP_SEC_DES_BLOCK_SIZE 8 /* Bytes */ #define XLP_SEC_3DES_BLOCK_SIZE 8 /* Bytes */ #define XLP_SEC_MD5_BLOCK_SIZE 64 /* Bytes */ #define XLP_SEC_SHA1_BLOCK_SIZE 64 /* Bytes */ #define XLP_SEC_SHA256_BLOCK_SIZE 64 /* Bytes */ #define XLP_SEC_SHA384_BLOCK_SIZE 128 /* Bytes */ #define XLP_SEC_SHA512_BLOCK_SIZE 128 /* Bytes */ #define XLP_SEC_GCM_BLOCK_SIZE 16 /* XXX: Bytes */ #define XLP_SEC_KASUMI_F9_BLOCK_SIZE 16 /* XXX: Bytes */ #define XLP_SEC_MAX_BLOCK_SIZE 64 /* Max of MD5/SHA */ #define XLP_SEC_MD5_LENGTH 16 /* Bytes */ #define XLP_SEC_SHA1_LENGTH 20 /* Bytes */ #define XLP_SEC_SHA256_LENGTH 32 /* Bytes */ #define XLP_SEC_SHA384_LENGTH 64 /* Bytes */ #define XLP_SEC_SHA512_LENGTH 64 /* Bytes */ #define XLP_SEC_GCM_LENGTH 16 /* Bytes */ #define XLP_SEC_KASUMI_F9_LENGTH 16 /* Bytes */ #define XLP_SEC_KASUMI_F9_RESULT_LENGTH 4 /* Bytes */ #define XLP_SEC_HMAC_LENGTH 64 /* Max of MD5/SHA/SHA256 */ #define XLP_SEC_MAX_AUTH_KEY_LENGTH XLP_SEC_SHA512_BLOCK_SIZE #define XLP_SEC_MAX_RC4_STATE_SIZE 264 /* char s[256], int i, int j */ -#define XLP_SEC_SESSION(sid) ((sid) & 0x000007ff) -#define XLP_SEC_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff)) - #define CRYPTO_ERROR(msg1) ((unsigned int)msg1) #define NLM_CRYPTO_LEFT_REQS (CMS_DEFAULT_CREDIT/2) #define NLM_CRYPTO_NUM_SEGS_REQD(__bufsize) \ ((__bufsize + NLM_CRYPTO_MAX_SEG_LEN - 1) / NLM_CRYPTO_MAX_SEG_LEN) #define NLM_CRYPTO_PKT_DESC_SIZE(nsegs) (32 + (nsegs * 16)) extern unsigned int creditleft; struct xlp_sec_command { - uint16_t session_num; struct cryptop *crp; struct cryptodesc *enccrd, *maccrd; struct xlp_sec_session *ses; struct nlm_crypto_pkt_ctrl *ctrlp; struct nlm_crypto_pkt_param *paramp; void *iv; uint8_t des3key[24]; uint8_t *hashdest; uint8_t hashsrc; uint8_t hmacpad; uint32_t hashoff; uint32_t hashlen; uint32_t cipheroff; uint32_t cipherlen; uint32_t ivoff; uint32_t ivlen; uint32_t hashalg; uint32_t hashmode; uint32_t cipheralg; uint32_t ciphermode; uint32_t nsegs; uint32_t hash_dst_len; /* used to store hash alg dst size */ }; struct xlp_sec_session { - uint32_t sessionid; - int hs_used; int hs_mlen; uint8_t ses_iv[EALG_MAX_BLOCK_LEN]; struct xlp_sec_command cmd; }; /* * Holds data specific to nlm security accelerators */ struct xlp_sec_softc { device_t sc_dev; /* device backpointer */ uint64_t sec_base; int32_t sc_cid; - struct xlp_sec_session *sc_sessions; - int sc_nsessions; int sc_needwakeup; uint32_t sec_vc_start; uint32_t sec_vc_end; uint32_t sec_msgsz; }; #ifdef NLM_SEC_DEBUG void print_crypto_params(struct xlp_sec_command *cmd, struct nlm_fmn_msg m); void xlp_sec_print_data(struct cryptop *crp); void print_cmd(struct xlp_sec_command *cmd); #endif int nlm_crypto_form_srcdst_segs(struct xlp_sec_command *cmd); int nlm_crypto_do_cipher(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd); int nlm_crypto_do_digest(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd); int nlm_crypto_do_cipher_digest(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd); int nlm_get_digest_param(struct xlp_sec_command *cmd); int nlm_get_cipher_param(struct xlp_sec_command *cmd); #endif /* _NLMSECLIB_H_ */ Index: head/sys/netipsec/ipsec.c =================================================================== --- head/sys/netipsec/ipsec.c (revision 336438) +++ head/sys/netipsec/ipsec.c (revision 336439) @@ -1,1405 +1,1405 @@ /* $FreeBSD$ */ /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPsec controller part. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #endif #include #ifdef INET6 #include #endif #include #include #ifdef INET6 #include #endif #include #include #include /*XXX*/ #include #include #include #include #include #include #include #include /* NB: name changed so netstat doesn't use it. */ VNET_PCPUSTAT_DEFINE(struct ipsecstat, ipsec4stat); VNET_PCPUSTAT_SYSINIT(ipsec4stat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ipsec4stat); #endif /* VIMAGE */ /* DF bit on encap. 0: clear 1: set 2: copy */ VNET_DEFINE(int, ip4_ipsec_dfbit) = 0; VNET_DEFINE(int, ip4_esp_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_esp_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_ah_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip4_ah_net_deflev) = IPSEC_LEVEL_USE; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ VNET_DEFINE(int, ip4_ipsec_ecn) = 0; static VNET_DEFINE(int, ip4_filtertunnel) = 0; #define V_ip4_filtertunnel VNET(ip4_filtertunnel) static VNET_DEFINE(int, check_policy_history) = 0; #define V_check_policy_history VNET(check_policy_history) static VNET_DEFINE(struct secpolicy *, def_policy) = NULL; #define V_def_policy VNET(def_policy) static int sysctl_def_policy(SYSCTL_HANDLER_ARGS) { int error, value; value = V_def_policy->policy; error = sysctl_handle_int(oidp, &value, 0, req); if (error == 0) { if (value != IPSEC_POLICY_DISCARD && value != IPSEC_POLICY_NONE) return (EINVAL); V_def_policy->policy = value; } return (error); } /* * Crypto support requirements: * * 1 require hardware support * -1 require software support * 0 take anything */ VNET_DEFINE(int, crypto_support) = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; /* * Use asynchronous mode to parallelize crypto jobs: * * 0 - disabled * 1 - enabled */ VNET_DEFINE(int, async_crypto) = 0; /* * TCP/UDP checksum handling policy for transport mode NAT-T (RFC3948) * * 0 - auto: incrementally recompute, when checksum delta is known; * if checksum delta isn't known, reset checksum to zero for UDP, * and mark csum_flags as valid for TCP. * 1 - fully recompute TCP/UDP checksum. */ VNET_DEFINE(int, natt_cksum_policy) = 0; FEATURE(ipsec, "Internet Protocol Security (IPsec)"); FEATURE(ipsec_natt, "UDP Encapsulation of IPsec ESP Packets ('NAT-T')"); SYSCTL_DECL(_net_inet_ipsec); /* net.inet.ipsec */ SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 0, 0, sysctl_def_policy, "I", "IPsec default policy."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_esp_trans_deflev), 0, "Default ESP transport mode level"); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_esp_net_deflev), 0, "Default ESP tunnel mode level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ah_trans_deflev), 0, "AH transfer mode default level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ah_net_deflev), 0, "AH tunnel mode default level."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS, ah_cleartos, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, "If set, clear type-of-service field when doing AH computation."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT, dfbit, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_dfbit), 0, "Do not fragment bit on encap."); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN, ecn, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_ipsec_ecn), 0, "Explicit Congestion Notification handling."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, crypto_support, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(crypto_support), 0, "Crypto driver selection."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, async_crypto, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(async_crypto), 0, "Use asynchronous mode to parallelize crypto jobs."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, check_policy_history, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(check_policy_history), 0, "Use strict check of inbound packets to security policy compliance."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, natt_cksum_policy, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(natt_cksum_policy), 0, "Method to fix TCP/UDP checksum for transport mode IPsec after NAT."); SYSCTL_INT(_net_inet_ipsec, OID_AUTO, filtertunnel, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip4_filtertunnel), 0, "If set, filter packets from an IPsec tunnel."); SYSCTL_VNET_PCPUSTAT(_net_inet_ipsec, OID_AUTO, ipsecstats, struct ipsecstat, ipsec4stat, "IPsec IPv4 statistics."); #ifdef REGRESSION /* * When set to 1, IPsec will send packets with the same sequence number. * This allows to verify if the other side has proper replay attacks detection. */ VNET_DEFINE(int, ipsec_replay) = 0; SYSCTL_INT(_net_inet_ipsec, OID_AUTO, test_replay, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_replay), 0, "Emulate replay attack"); /* * When set 1, IPsec will send packets with corrupted HMAC. * This allows to verify if the other side properly detects modified packets. */ VNET_DEFINE(int, ipsec_integrity) = 0; SYSCTL_INT(_net_inet_ipsec, OID_AUTO, test_integrity, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipsec_integrity), 0, "Emulate man-in-the-middle attack"); #endif #ifdef INET6 VNET_PCPUSTAT_DEFINE(struct ipsecstat, ipsec6stat); VNET_PCPUSTAT_SYSINIT(ipsec6stat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ipsec6stat); #endif /* VIMAGE */ VNET_DEFINE(int, ip6_esp_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_esp_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ah_trans_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ah_net_deflev) = IPSEC_LEVEL_USE; VNET_DEFINE(int, ip6_ipsec_ecn) = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ static VNET_DEFINE(int, ip6_filtertunnel) = 0; #define V_ip6_filtertunnel VNET(ip6_filtertunnel) SYSCTL_DECL(_net_inet6_ipsec6); /* net.inet6.ipsec6 */ SYSCTL_PROC(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW, 0, 0, sysctl_def_policy, "I", "IPsec default policy."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_esp_trans_deflev), 0, "Default ESP transport mode level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_esp_net_deflev), 0, "Default ESP tunnel mode level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ah_trans_deflev), 0, "AH transfer mode default level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ah_net_deflev), 0, "AH tunnel mode default level."); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN, ecn, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_ipsec_ecn), 0, "Explicit Congestion Notification handling."); SYSCTL_INT(_net_inet6_ipsec6, OID_AUTO, filtertunnel, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_filtertunnel), 0, "If set, filter packets from an IPsec tunnel."); SYSCTL_VNET_PCPUSTAT(_net_inet6_ipsec6, IPSECCTL_STATS, ipsecstats, struct ipsecstat, ipsec6stat, "IPsec IPv6 statistics."); #endif /* INET6 */ static int ipsec_in_reject(struct secpolicy *, struct inpcb *, const struct mbuf *); #ifdef INET static void ipsec4_get_ulp(const struct mbuf *, struct secpolicyindex *, int); static void ipsec4_setspidx_ipaddr(const struct mbuf *, struct secpolicyindex *); #endif #ifdef INET6 static void ipsec6_get_ulp(const struct mbuf *m, struct secpolicyindex *, int); static void ipsec6_setspidx_ipaddr(const struct mbuf *, struct secpolicyindex *); #endif /* * Return a held reference to the default SP. */ static struct secpolicy * key_allocsp_default(void) { key_addref(V_def_policy); return (V_def_policy); } static void ipsec_invalidate_cache(struct inpcb *inp, u_int dir) { struct secpolicy *sp; INP_WLOCK_ASSERT(inp); if (dir == IPSEC_DIR_OUTBOUND) { if (inp->inp_sp->flags & INP_INBOUND_POLICY) return; sp = inp->inp_sp->sp_in; inp->inp_sp->sp_in = NULL; } else { if (inp->inp_sp->flags & INP_OUTBOUND_POLICY) return; sp = inp->inp_sp->sp_out; inp->inp_sp->sp_out = NULL; } if (sp != NULL) key_freesp(&sp); /* release extra reference */ } static void ipsec_cachepolicy(struct inpcb *inp, struct secpolicy *sp, u_int dir) { uint32_t genid; int downgrade; INP_LOCK_ASSERT(inp); if (dir == IPSEC_DIR_OUTBOUND) { /* Do we have configured PCB policy? */ if (inp->inp_sp->flags & INP_OUTBOUND_POLICY) return; /* Another thread has already set cached policy */ if (inp->inp_sp->sp_out != NULL) return; /* * Do not cache OUTBOUND policy if PCB isn't connected, * i.e. foreign address is INADDR_ANY/UNSPECIFIED. */ #ifdef INET if ((inp->inp_vflag & INP_IPV4) != 0 && inp->inp_faddr.s_addr == INADDR_ANY) return; #endif #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0 && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) return; #endif } else { /* Do we have configured PCB policy? */ if (inp->inp_sp->flags & INP_INBOUND_POLICY) return; /* Another thread has already set cached policy */ if (inp->inp_sp->sp_in != NULL) return; /* * Do not cache INBOUND policy for listen socket, * that is bound to INADDR_ANY/UNSPECIFIED address. */ #ifdef INET if ((inp->inp_vflag & INP_IPV4) != 0 && inp->inp_faddr.s_addr == INADDR_ANY) return; #endif #ifdef INET6 if ((inp->inp_vflag & INP_IPV6) != 0 && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) return; #endif } downgrade = 0; if (!INP_WLOCKED(inp)) { if ((downgrade = INP_TRY_UPGRADE(inp)) == 0) return; } if (dir == IPSEC_DIR_OUTBOUND) inp->inp_sp->sp_out = sp; else inp->inp_sp->sp_in = sp; /* * SP is already referenced by the lookup code. * We take extra reference here to avoid race in the * ipsec_getpcbpolicy() function - SP will not be freed in the * time between we take SP pointer from the cache and key_addref() * call. */ key_addref(sp); genid = key_getspgen(); if (genid != inp->inp_sp->genid) { ipsec_invalidate_cache(inp, dir); inp->inp_sp->genid = genid; } KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): cached %s SP(%p)\n", __func__, inp, dir == IPSEC_DIR_OUTBOUND ? "OUTBOUND": "INBOUND", sp)); if (downgrade != 0) INP_DOWNGRADE(inp); } static struct secpolicy * ipsec_checkpolicy(struct secpolicy *sp, struct inpcb *inp, int *error) { /* Save found OUTBOUND policy into PCB SP cache. */ if (inp != NULL && inp->inp_sp != NULL && inp->inp_sp->sp_out == NULL) ipsec_cachepolicy(inp, sp, IPSEC_DIR_OUTBOUND); switch (sp->policy) { default: printf("%s: invalid policy %u\n", __func__, sp->policy); /* FALLTHROUGH */ case IPSEC_POLICY_DISCARD: *error = -EINVAL; /* Packet is discarded by caller. */ /* FALLTHROUGH */ case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: key_freesp(&sp); sp = NULL; /* NB: force NULL result. */ break; case IPSEC_POLICY_IPSEC: /* XXXAE: handle LARVAL SP */ break; } KEYDBG(IPSEC_DUMP, printf("%s: get SP(%p), error %d\n", __func__, sp, *error)); return (sp); } static struct secpolicy * ipsec_getpcbpolicy(struct inpcb *inp, u_int dir) { struct secpolicy *sp; int flags, downgrade; if (inp == NULL || inp->inp_sp == NULL) return (NULL); INP_LOCK_ASSERT(inp); flags = inp->inp_sp->flags; if (dir == IPSEC_DIR_OUTBOUND) { sp = inp->inp_sp->sp_out; flags &= INP_OUTBOUND_POLICY; } else { sp = inp->inp_sp->sp_in; flags &= INP_INBOUND_POLICY; } /* * Check flags. If we have PCB SP, just return it. * Otherwise we need to check that cached SP entry isn't stale. */ if (flags == 0) { if (sp == NULL) return (NULL); if (inp->inp_sp->genid != key_getspgen()) { /* Invalidate the cache. */ downgrade = 0; if (!INP_WLOCKED(inp)) { if ((downgrade = INP_TRY_UPGRADE(inp)) == 0) return (NULL); } ipsec_invalidate_cache(inp, IPSEC_DIR_OUTBOUND); ipsec_invalidate_cache(inp, IPSEC_DIR_INBOUND); if (downgrade != 0) INP_DOWNGRADE(inp); return (NULL); } KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): cache hit SP(%p)\n", __func__, inp, sp)); /* Return referenced cached policy */ } key_addref(sp); return (sp); } #ifdef INET static void ipsec4_get_ulp(const struct mbuf *m, struct secpolicyindex *spidx, int needport) { uint8_t nxt; int off; /* Sanity check. */ IPSEC_ASSERT(m->m_pkthdr.len >= sizeof(struct ip), ("packet too short")); if (m->m_len >= sizeof (struct ip)) { const struct ip *ip = mtod(m, const struct ip *); if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) goto done; off = ip->ip_hl << 2; nxt = ip->ip_p; } else { struct ip ih; m_copydata(m, 0, sizeof (struct ip), (caddr_t) &ih); if (ih.ip_off & htons(IP_MF | IP_OFFMASK)) goto done; off = ih.ip_hl << 2; nxt = ih.ip_p; } while (off < m->m_pkthdr.len) { struct ip6_ext ip6e; struct tcphdr th; struct udphdr uh; switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; if (!needport) goto done_proto; if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) goto done; m_copydata(m, off, sizeof (th), (caddr_t) &th); spidx->src.sin.sin_port = th.th_sport; spidx->dst.sin.sin_port = th.th_dport; return; case IPPROTO_UDP: spidx->ul_proto = nxt; if (!needport) goto done_proto; if (off + sizeof(struct udphdr) > m->m_pkthdr.len) goto done; m_copydata(m, off, sizeof (uh), (caddr_t) &uh); spidx->src.sin.sin_port = uh.uh_sport; spidx->dst.sin.sin_port = uh.uh_dport; return; case IPPROTO_AH: if (off + sizeof(ip6e) > m->m_pkthdr.len) goto done; /* XXX Sigh, this works but is totally bogus. */ m_copydata(m, off, sizeof(ip6e), (caddr_t) &ip6e); off += (ip6e.ip6e_len + 2) << 2; nxt = ip6e.ip6e_nxt; break; case IPPROTO_ICMP: default: /* XXX Intermediate headers??? */ spidx->ul_proto = nxt; goto done_proto; } } done: spidx->ul_proto = IPSEC_ULPROTO_ANY; done_proto: spidx->src.sin.sin_port = IPSEC_PORT_ANY; spidx->dst.sin.sin_port = IPSEC_PORT_ANY; KEYDBG(IPSEC_DUMP, printf("%s: ", __func__); kdebug_secpolicyindex(spidx, NULL)); } static void ipsec4_setspidx_ipaddr(const struct mbuf *m, struct secpolicyindex *spidx) { ipsec4_setsockaddrs(m, &spidx->src, &spidx->dst); spidx->prefs = sizeof(struct in_addr) << 3; spidx->prefd = sizeof(struct in_addr) << 3; } static struct secpolicy * ipsec4_getpolicy(const struct mbuf *m, struct inpcb *inp, u_int dir, int needport) { struct secpolicyindex spidx; struct secpolicy *sp; sp = ipsec_getpcbpolicy(inp, dir); if (sp == NULL && key_havesp(dir)) { /* Make an index to look for a policy. */ ipsec4_setspidx_ipaddr(m, &spidx); ipsec4_get_ulp(m, &spidx, needport); spidx.dir = dir; sp = key_allocsp(&spidx, dir); } if (sp == NULL) /* No SP found, use system default. */ sp = key_allocsp_default(); return (sp); } /* * Check security policy for *OUTBOUND* IPv4 packet. */ struct secpolicy * ipsec4_checkpolicy(const struct mbuf *m, struct inpcb *inp, int *error, int needport) { struct secpolicy *sp; *error = 0; sp = ipsec4_getpolicy(m, inp, IPSEC_DIR_OUTBOUND, needport); if (sp != NULL) sp = ipsec_checkpolicy(sp, inp, error); if (sp == NULL) { switch (*error) { case 0: /* No IPsec required: BYPASS or NONE */ break; case -EINVAL: IPSECSTAT_INC(ips_out_polvio); break; default: IPSECSTAT_INC(ips_out_inval); } } KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p), error %d\n", __func__, sp, *error)); if (sp != NULL) KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); return (sp); } /* * Check IPv4 packet against *INBOUND* security policy. * This function is called from tcp_input(), udp_input(), * rip_input() and sctp_input(). */ int ipsec4_in_reject(const struct mbuf *m, struct inpcb *inp) { struct secpolicy *sp; int result; sp = ipsec4_getpolicy(m, inp, IPSEC_DIR_INBOUND, 0); result = ipsec_in_reject(sp, inp, m); key_freesp(&sp); if (result != 0) IPSECSTAT_INC(ips_in_polvio); return (result); } /* * IPSEC_CAP() method implementation for IPv4. */ int ipsec4_capability(struct mbuf *m, u_int cap) { switch (cap) { case IPSEC_CAP_BYPASS_FILTER: /* * Bypass packet filtering for packets previously handled * by IPsec. */ if (!V_ip4_filtertunnel && m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL) return (1); return (0); case IPSEC_CAP_OPERABLE: /* Do we have active security policies? */ if (key_havesp(IPSEC_DIR_INBOUND) != 0 || key_havesp(IPSEC_DIR_OUTBOUND) != 0) return (1); return (0); }; return (EOPNOTSUPP); } #endif /* INET */ #ifdef INET6 static void ipsec6_get_ulp(const struct mbuf *m, struct secpolicyindex *spidx, int needport) { struct tcphdr th; struct udphdr uh; struct icmp6_hdr ih; int off, nxt; IPSEC_ASSERT(m->m_pkthdr.len >= sizeof(struct ip6_hdr), ("packet too short")); /* Set default. */ spidx->ul_proto = IPSEC_ULPROTO_ANY; spidx->src.sin6.sin6_port = IPSEC_PORT_ANY; spidx->dst.sin6.sin6_port = IPSEC_PORT_ANY; nxt = -1; off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); if (off < 0 || m->m_pkthdr.len < off) return; switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; if (!needport) break; if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(th), (caddr_t)&th); spidx->src.sin6.sin6_port = th.th_sport; spidx->dst.sin6.sin6_port = th.th_dport; break; case IPPROTO_UDP: spidx->ul_proto = nxt; if (!needport) break; if (off + sizeof(struct udphdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(uh), (caddr_t)&uh); spidx->src.sin6.sin6_port = uh.uh_sport; spidx->dst.sin6.sin6_port = uh.uh_dport; break; case IPPROTO_ICMPV6: spidx->ul_proto = nxt; if (off + sizeof(struct icmp6_hdr) > m->m_pkthdr.len) break; m_copydata(m, off, sizeof(ih), (caddr_t)&ih); spidx->src.sin6.sin6_port = htons((uint16_t)ih.icmp6_type); spidx->dst.sin6.sin6_port = htons((uint16_t)ih.icmp6_code); break; default: /* XXX Intermediate headers??? */ spidx->ul_proto = nxt; break; } KEYDBG(IPSEC_DUMP, printf("%s: ", __func__); kdebug_secpolicyindex(spidx, NULL)); } static void ipsec6_setspidx_ipaddr(const struct mbuf *m, struct secpolicyindex *spidx) { ipsec6_setsockaddrs(m, &spidx->src, &spidx->dst); spidx->prefs = sizeof(struct in6_addr) << 3; spidx->prefd = sizeof(struct in6_addr) << 3; } static struct secpolicy * ipsec6_getpolicy(const struct mbuf *m, struct inpcb *inp, u_int dir, int needport) { struct secpolicyindex spidx; struct secpolicy *sp; sp = ipsec_getpcbpolicy(inp, dir); if (sp == NULL && key_havesp(dir)) { /* Make an index to look for a policy. */ ipsec6_setspidx_ipaddr(m, &spidx); ipsec6_get_ulp(m, &spidx, needport); spidx.dir = dir; sp = key_allocsp(&spidx, dir); } if (sp == NULL) /* No SP found, use system default. */ sp = key_allocsp_default(); return (sp); } /* * Check security policy for *OUTBOUND* IPv6 packet. */ struct secpolicy * ipsec6_checkpolicy(const struct mbuf *m, struct inpcb *inp, int *error, int needport) { struct secpolicy *sp; *error = 0; sp = ipsec6_getpolicy(m, inp, IPSEC_DIR_OUTBOUND, needport); if (sp != NULL) sp = ipsec_checkpolicy(sp, inp, error); if (sp == NULL) { switch (*error) { case 0: /* No IPsec required: BYPASS or NONE */ break; case -EINVAL: IPSEC6STAT_INC(ips_out_polvio); break; default: IPSEC6STAT_INC(ips_out_inval); } } KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p), error %d\n", __func__, sp, *error)); if (sp != NULL) KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); return (sp); } /* * Check IPv6 packet against inbound security policy. * This function is called from tcp6_input(), udp6_input(), * rip6_input() and sctp_input(). */ int ipsec6_in_reject(const struct mbuf *m, struct inpcb *inp) { struct secpolicy *sp; int result; sp = ipsec6_getpolicy(m, inp, IPSEC_DIR_INBOUND, 0); result = ipsec_in_reject(sp, inp, m); key_freesp(&sp); if (result) IPSEC6STAT_INC(ips_in_polvio); return (result); } /* * IPSEC_CAP() method implementation for IPv6. */ int ipsec6_capability(struct mbuf *m, u_int cap) { switch (cap) { case IPSEC_CAP_BYPASS_FILTER: /* * Bypass packet filtering for packets previously handled * by IPsec. */ if (!V_ip6_filtertunnel && m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL) return (1); return (0); case IPSEC_CAP_OPERABLE: /* Do we have active security policies? */ if (key_havesp(IPSEC_DIR_INBOUND) != 0 || key_havesp(IPSEC_DIR_OUTBOUND) != 0) return (1); return (0); }; return (EOPNOTSUPP); } #endif /* INET6 */ int ipsec_run_hhooks(struct ipsec_ctx_data *ctx, int type) { int idx; switch (ctx->af) { #ifdef INET case AF_INET: idx = HHOOK_IPSEC_INET; break; #endif #ifdef INET6 case AF_INET6: idx = HHOOK_IPSEC_INET6; break; #endif default: return (EPFNOSUPPORT); } if (type == HHOOK_TYPE_IPSEC_IN) HHOOKS_RUN_IF(V_ipsec_hhh_in[idx], ctx, NULL); else HHOOKS_RUN_IF(V_ipsec_hhh_out[idx], ctx, NULL); if (*ctx->mp == NULL) return (EACCES); return (0); } /* * Return current level. * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned. */ u_int ipsec_get_reqlevel(struct secpolicy *sp, u_int idx) { struct ipsecrequest *isr; u_int esp_trans_deflev, esp_net_deflev; u_int ah_trans_deflev, ah_net_deflev; u_int level = 0; IPSEC_ASSERT(idx < sp->tcount, ("Wrong IPsec request index %d", idx)); /* XXX Note that we have ipseclog() expanded here - code sync issue. */ #define IPSEC_CHECK_DEFAULT(lev) \ (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE && \ (lev) != IPSEC_LEVEL_UNIQUE) \ ? (V_ipsec_debug ? \ log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\ (lev), IPSEC_LEVEL_REQUIRE) : 0), \ (lev) = IPSEC_LEVEL_REQUIRE, (lev) : (lev)) /* * IPsec VTI uses unique security policy with fake spidx filled * with zeroes. Just return IPSEC_LEVEL_REQUIRE instead of doing * full level lookup for such policies. */ if (sp->state == IPSEC_SPSTATE_IFNET) { IPSEC_ASSERT(sp->req[idx]->level == IPSEC_LEVEL_UNIQUE, ("Wrong IPsec request level %d", sp->req[idx]->level)); return (IPSEC_LEVEL_REQUIRE); } /* Set default level. */ switch (sp->spidx.src.sa.sa_family) { #ifdef INET case AF_INET: esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_net_deflev); break; #endif #ifdef INET6 case AF_INET6: esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_net_deflev); break; #endif /* INET6 */ default: panic("%s: unknown af %u", __func__, sp->spidx.src.sa.sa_family); } #undef IPSEC_CHECK_DEFAULT isr = sp->req[idx]; /* Set level. */ switch (isr->level) { case IPSEC_LEVEL_DEFAULT: switch (isr->saidx.proto) { case IPPROTO_ESP: if (isr->saidx.mode == IPSEC_MODE_TUNNEL) level = esp_net_deflev; else level = esp_trans_deflev; break; case IPPROTO_AH: if (isr->saidx.mode == IPSEC_MODE_TUNNEL) level = ah_net_deflev; else level = ah_trans_deflev; break; case IPPROTO_IPCOMP: /* * We don't really care, as IPcomp document says that * we shouldn't compress small packets. */ level = IPSEC_LEVEL_USE; break; default: panic("%s: Illegal protocol defined %u\n", __func__, isr->saidx.proto); } break; case IPSEC_LEVEL_USE: case IPSEC_LEVEL_REQUIRE: level = isr->level; break; case IPSEC_LEVEL_UNIQUE: level = IPSEC_LEVEL_REQUIRE; break; default: panic("%s: Illegal IPsec level %u\n", __func__, isr->level); } return (level); } static int ipsec_check_history(const struct mbuf *m, struct secpolicy *sp, u_int idx) { struct xform_history *xh; struct m_tag *mtag; mtag = NULL; while ((mtag = m_tag_find(__DECONST(struct mbuf *, m), PACKET_TAG_IPSEC_IN_DONE, mtag)) != NULL) { xh = (struct xform_history *)(mtag + 1); KEYDBG(IPSEC_DATA, char buf[IPSEC_ADDRSTRLEN]; printf("%s: mode %s proto %u dst %s\n", __func__, kdebug_secasindex_mode(xh->mode), xh->proto, ipsec_address(&xh->dst, buf, sizeof(buf)))); if (xh->proto != sp->req[idx]->saidx.proto) continue; /* If SA had IPSEC_MODE_ANY, consider this as match. */ if (xh->mode != sp->req[idx]->saidx.mode && xh->mode != IPSEC_MODE_ANY) continue; /* * For transport mode IPsec request doesn't contain * addresses. We need to use address from spidx. */ if (sp->req[idx]->saidx.mode == IPSEC_MODE_TRANSPORT) { if (key_sockaddrcmp_withmask(&xh->dst.sa, &sp->spidx.dst.sa, sp->spidx.prefd) != 0) continue; } else { if (key_sockaddrcmp(&xh->dst.sa, &sp->req[idx]->saidx.dst.sa, 0) != 0) continue; } return (0); /* matched */ } return (1); } /* * Check security policy requirements against the actual * packet contents. Return one if the packet should be * reject as "invalid"; otherwiser return zero to have the * packet treated as "valid". * * OUT: * 0: valid * 1: invalid */ static int ipsec_in_reject(struct secpolicy *sp, struct inpcb *inp, const struct mbuf *m) { int i; KEYDBG(IPSEC_STAMP, printf("%s: PCB(%p): using SP(%p)\n", __func__, inp, sp)); KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); if (inp != NULL && inp->inp_sp != NULL && inp->inp_sp->sp_in == NULL) ipsec_cachepolicy(inp, sp, IPSEC_DIR_INBOUND); /* Check policy. */ switch (sp->policy) { case IPSEC_POLICY_DISCARD: return (1); case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return (0); } IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC, ("invalid policy %u", sp->policy)); /* * ipsec[46]_common_input_cb after each transform adds * PACKET_TAG_IPSEC_IN_DONE mbuf tag. It contains SPI, proto, mode * and destination address from saidx. We can compare info from * these tags with requirements in SP. */ for (i = 0; i < sp->tcount; i++) { /* * Do not check IPcomp, since IPcomp document * says that we shouldn't compress small packets. * IPComp policy should always be treated as being * in "use" level. */ if (sp->req[i]->saidx.proto == IPPROTO_IPCOMP || ipsec_get_reqlevel(sp, i) != IPSEC_LEVEL_REQUIRE) continue; if (V_check_policy_history != 0 && ipsec_check_history(m, sp, i) != 0) return (1); else switch (sp->req[i]->saidx.proto) { case IPPROTO_ESP: if ((m->m_flags & M_DECRYPTED) == 0) { KEYDBG(IPSEC_DUMP, printf("%s: ESP m_flags:%x\n", __func__, m->m_flags)); return (1); } break; case IPPROTO_AH: if ((m->m_flags & M_AUTHIPHDR) == 0) { KEYDBG(IPSEC_DUMP, printf("%s: AH m_flags:%x\n", __func__, m->m_flags)); return (1); } break; } } return (0); /* Valid. */ } /* * Compute the byte size to be occupied by IPsec header. * In case it is tunnelled, it includes the size of outer IP header. */ static size_t ipsec_hdrsiz_internal(struct secpolicy *sp) { size_t size; int i; KEYDBG(IPSEC_STAMP, printf("%s: using SP(%p)\n", __func__, sp)); KEYDBG(IPSEC_DATA, kdebug_secpolicy(sp)); switch (sp->policy) { case IPSEC_POLICY_DISCARD: case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return (0); } IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC, ("invalid policy %u", sp->policy)); /* * XXX: for each transform we need to lookup suitable SA * and use info from SA to calculate headers size. * XXX: for NAT-T we need to cosider UDP header size. */ size = 0; for (i = 0; i < sp->tcount; i++) { switch (sp->req[i]->saidx.proto) { case IPPROTO_ESP: size += esp_hdrsiz(NULL); break; case IPPROTO_AH: size += ah_hdrsiz(NULL); break; case IPPROTO_IPCOMP: size += sizeof(struct ipcomp); break; } if (sp->req[i]->saidx.mode == IPSEC_MODE_TUNNEL) { switch (sp->req[i]->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: size += sizeof(struct ip); break; #endif #ifdef INET6 case AF_INET6: size += sizeof(struct ip6_hdr); break; #endif default: ipseclog((LOG_ERR, "%s: unknown AF %d in " "IPsec tunnel SA\n", __func__, sp->req[i]->saidx.dst.sa.sa_family)); break; } } } return (size); } /* * Compute ESP/AH header size for protocols with PCB, including * outer IP header. Currently only tcp_output() uses it. */ size_t ipsec_hdrsiz_inpcb(struct inpcb *inp) { struct secpolicyindex spidx; struct secpolicy *sp; size_t sz; sp = ipsec_getpcbpolicy(inp, IPSEC_DIR_OUTBOUND); if (sp == NULL && key_havesp(IPSEC_DIR_OUTBOUND)) { ipsec_setspidx_inpcb(inp, &spidx, IPSEC_DIR_OUTBOUND); sp = key_allocsp(&spidx, IPSEC_DIR_OUTBOUND); } if (sp == NULL) sp = key_allocsp_default(); sz = ipsec_hdrsiz_internal(sp); key_freesp(&sp); return (sz); } /* * Check the variable replay window. * ipsec_chkreplay() performs replay check before ICV verification. * ipsec_updatereplay() updates replay bitmap. This must be called after * ICV verification (it also performs replay check, which is usually done * beforehand). * 0 (zero) is returned if packet disallowed, 1 if packet permitted. * * Based on RFC 6479. Blocks are 32 bits unsigned integers */ #define IPSEC_BITMAP_INDEX_MASK(w) (w - 1) #define IPSEC_REDUNDANT_BIT_SHIFTS 5 #define IPSEC_REDUNDANT_BITS (1 << IPSEC_REDUNDANT_BIT_SHIFTS) #define IPSEC_BITMAP_LOC_MASK (IPSEC_REDUNDANT_BITS - 1) int ipsec_chkreplay(uint32_t seq, struct secasvar *sav) { const struct secreplay *replay; uint32_t wsizeb; /* Constant: window size. */ int index, bit_location; IPSEC_ASSERT(sav != NULL, ("Null SA")); IPSEC_ASSERT(sav->replay != NULL, ("Null replay state")); replay = sav->replay; /* No need to check replay if disabled. */ if (replay->wsize == 0) return (1); /* Constant. */ wsizeb = replay->wsize << 3; /* Sequence number of 0 is invalid. */ if (seq == 0) return (0); /* First time is always okay. */ if (replay->count == 0) return (1); /* Larger sequences are okay. */ if (seq > replay->lastseq) return (1); /* Over range to check, i.e. too old or wrapped. */ if (replay->lastseq - seq >= wsizeb) return (0); /* The sequence is inside the sliding window * now check the bit in the bitmap * bit location only depends on the sequence number */ bit_location = seq & IPSEC_BITMAP_LOC_MASK; index = (seq >> IPSEC_REDUNDANT_BIT_SHIFTS) & IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size); /* This packet already seen? */ if ((replay->bitmap)[index] & (1 << bit_location)) return (0); return (1); } /* * Check replay counter whether to update or not. * OUT: 0: OK * 1: NG */ int ipsec_updatereplay(uint32_t seq, struct secasvar *sav) { char buf[128]; struct secreplay *replay; uint32_t wsizeb; /* Constant: window size. */ int diff, index, bit_location; IPSEC_ASSERT(sav != NULL, ("Null SA")); IPSEC_ASSERT(sav->replay != NULL, ("Null replay state")); replay = sav->replay; if (replay->wsize == 0) goto ok; /* No need to check replay. */ /* Constant. */ wsizeb = replay->wsize << 3; /* Sequence number of 0 is invalid. */ if (seq == 0) return (1); /* The packet is too old, no need to update */ if (wsizeb + seq < replay->lastseq) goto ok; /* Now update the bit */ index = (seq >> IPSEC_REDUNDANT_BIT_SHIFTS); /* First check if the sequence number is in the range */ if (seq > replay->lastseq) { int id; int index_cur = replay->lastseq >> IPSEC_REDUNDANT_BIT_SHIFTS; diff = index - index_cur; if (diff > replay->bitmap_size) { /* something unusual in this case */ diff = replay->bitmap_size; } for (id = 0; id < diff; ++id) { replay->bitmap[(id + index_cur + 1) & IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size)] = 0; } replay->lastseq = seq; } index &= IPSEC_BITMAP_INDEX_MASK(replay->bitmap_size); bit_location = seq & IPSEC_BITMAP_LOC_MASK; /* this packet has already been received */ if (replay->bitmap[index] & (1 << bit_location)) return (1); replay->bitmap[index] |= (1 << bit_location); ok: if (replay->count == ~0) { /* Set overflow flag. */ replay->overflow++; /* Don't increment, no more packets accepted. */ if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { if (sav->sah->saidx.proto == IPPROTO_AH) AHSTAT_INC(ahs_wrap); else if (sav->sah->saidx.proto == IPPROTO_ESP) ESPSTAT_INC(esps_wrap); return (1); } ipseclog((LOG_WARNING, "%s: replay counter made %d cycle. %s\n", __func__, replay->overflow, ipsec_sa2str(sav, buf, sizeof(buf)))); } return (0); } int ipsec_updateid(struct secasvar *sav, crypto_session_t *new, crypto_session_t *old) { crypto_session_t tmp; /* * tdb_cryptoid is initialized by xform_init(). * Then it can be changed only when some crypto error occurred or * when SA is deleted. We stored used cryptoid in the xform_data * structure. In case when crypto error occurred and crypto * subsystem has reinited the session, it returns new cryptoid * and EAGAIN error code. * * This function will be called when we got EAGAIN from crypto * subsystem. * *new is cryptoid that was returned by crypto subsystem in * the crp_sid. * *old is the original cryptoid that we stored in xform_data. * * For first failed request *old == sav->tdb_cryptoid, then * we update sav->tdb_cryptoid and redo crypto_dispatch(). * For next failed request *old != sav->tdb_cryptoid, then * we store cryptoid from first request into the *new variable * and crp_sid from this second session will be returned via * *old pointer, so caller can release second session. * * XXXAE: check this more carefully. */ KEYDBG(IPSEC_STAMP, - printf("%s: SA(%p) moves cryptoid %jd -> %jd\n", - __func__, sav, (uintmax_t)(*old), (uintmax_t)(*new))); + printf("%s: SA(%p) moves cryptoid %p -> %p\n", + __func__, sav, *old, *new)); KEYDBG(IPSEC_DATA, kdebug_secasv(sav)); SECASVAR_LOCK(sav); if (sav->tdb_cryptoid != *old) { /* cryptoid was already updated */ tmp = *new; *new = sav->tdb_cryptoid; *old = tmp; SECASVAR_UNLOCK(sav); return (1); } sav->tdb_cryptoid = *new; SECASVAR_UNLOCK(sav); return (0); } int ipsec_initialized(void) { return (V_def_policy != NULL); } static void def_policy_init(const void *unused __unused) { V_def_policy = key_newsp(); if (V_def_policy != NULL) { V_def_policy->policy = IPSEC_POLICY_NONE; /* Force INPCB SP cache invalidation */ key_bumpspgen(); } else printf("%s: failed to initialize default policy\n", __func__); } static void def_policy_uninit(const void *unused __unused) { if (V_def_policy != NULL) { key_freesp(&V_def_policy); key_bumpspgen(); } } VNET_SYSINIT(def_policy_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, def_policy_init, NULL); VNET_SYSUNINIT(def_policy_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, def_policy_uninit, NULL); Index: head/sys/netipsec/xform_ah.c =================================================================== --- head/sys/netipsec/xform_ah.c (revision 336438) +++ head/sys/netipsec/xform_ah.c (revision 336439) @@ -1,1172 +1,1171 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_ah.c,v 1.63 2001/06/26 06:18:58 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist. * * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 1999 Niklas Hallqvist. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include /* * Return header size in bytes. The old protocol did not support * the replay counter; the new protocol always includes the counter. */ #define HDRSIZE(sav) \ (((sav)->flags & SADB_X_EXT_OLD) ? \ sizeof (struct ah) : sizeof (struct ah) + sizeof (u_int32_t)) /* * Return authenticator size in bytes, based on a field in the * algorithm descriptor. */ #define AUTHSIZE(sav) ((sav->flags & SADB_X_EXT_OLD) ? 16 : \ xform_ah_authsize((sav)->tdb_authalgxform)) VNET_DEFINE(int, ah_enable) = 1; /* control flow of packets with AH */ VNET_DEFINE(int, ah_cleartos) = 1; /* clear ip_tos when doing AH calc */ VNET_PCPUSTAT_DEFINE(struct ahstat, ahstat); VNET_PCPUSTAT_SYSINIT(ahstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ahstat); #endif /* VIMAGE */ #ifdef INET SYSCTL_DECL(_net_inet_ah); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_enable), 0, ""); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_cleartos, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_ah, IPSECCTL_STATS, stats, struct ahstat, ahstat, "AH statistics (struct ahstat, netipsec/ah_var.h)"); #endif static unsigned char ipseczeroes[256]; /* larger than an ip6 extension hdr */ static int ah_input_cb(struct cryptop*); static int ah_output_cb(struct cryptop*); int xform_ah_authsize(const struct auth_hash *esph) { int alen; if (esph == NULL) return 0; switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize / 2; /* RFC4868 2.3 */ break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: alen = esph->hashsize; break; default: alen = AH_HMAC_HASHLEN; break; } return alen; } size_t ah_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { int authsize, rplen, align; IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null xform")); /*XXX not right for null algorithm--does it matter??*/ /* RFC4302: use the correct alignment. */ align = sizeof(uint32_t); #ifdef INET6 if (sav->sah->saidx.dst.sa.sa_family == AF_INET6) { align = sizeof(uint64_t); } #endif rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); size = roundup(rplen + authsize, align); } else { /* default guess */ size = sizeof (struct ah) + sizeof (u_int32_t) + 16; } return size; } /* * NB: public for use by esp_init. */ int ah_init0(struct secasvar *sav, struct xformsw *xsp, struct cryptoini *cria) { const struct auth_hash *thash; int keylen; thash = auth_algorithm_lookup(sav->alg_auth); if (thash == NULL) { DPRINTF(("%s: unsupported authentication algorithm %u\n", __func__, sav->alg_auth)); return EINVAL; } /* * Verify the replay state block allocation is consistent with * the protocol type. We check here so we can make assumptions * later during protocol processing. */ /* NB: replay state is setup elsewhere (sigh) */ if (((sav->flags&SADB_X_EXT_OLD) == 0) ^ (sav->replay != NULL)) { DPRINTF(("%s: replay state block inconsistency, " "%s algorithm %s replay state\n", __func__, (sav->flags & SADB_X_EXT_OLD) ? "old" : "new", sav->replay == NULL ? "without" : "with")); return EINVAL; } if (sav->key_auth == NULL) { DPRINTF(("%s: no authentication key for %s algorithm\n", __func__, thash->name)); return EINVAL; } keylen = _KEYLEN(sav->key_auth); if (keylen > thash->keysize && thash->keysize != 0) { DPRINTF(("%s: invalid keylength %d, algorithm %s requires " "keysize less than %d\n", __func__, keylen, thash->name, thash->keysize)); return EINVAL; } sav->tdb_xform = xsp; sav->tdb_authalgxform = thash; /* Initialize crypto session. */ bzero(cria, sizeof (*cria)); cria->cri_alg = sav->tdb_authalgxform->type; cria->cri_klen = _KEYBITS(sav->key_auth); cria->cri_key = sav->key_auth->key_data; cria->cri_mlen = AUTHSIZE(sav); return 0; } /* * ah_init() is called when an SPI is being set up. */ static int ah_init(struct secasvar *sav, struct xformsw *xsp) { struct cryptoini cria; int error; error = ah_init0(sav, xsp, &cria); return error ? error : crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support); } /* * Paranoia. * * NB: public for use by esp_zeroize (XXX). */ int ah_zeroize(struct secasvar *sav) { - int err; if (sav->key_auth) bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth)); - err = crypto_freesession(sav->tdb_cryptoid); - sav->tdb_cryptoid = 0; + crypto_freesession(sav->tdb_cryptoid); + sav->tdb_cryptoid = NULL; sav->tdb_authalgxform = NULL; sav->tdb_xform = NULL; - return err; + return 0; } /* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int ad, alloc, nxt, noff; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("%s: m_pullup failed\n", __func__)); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (V_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK) ip->ip_off &= htons(IP_DF); else ip->ip_off = htons(0); ptr = mtod(m, unsigned char *); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("%s: illegal IPv4 option length for " "option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr + off, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("%s: malformed IPv4 options header\n", __func__)); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__)); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("%s: failed to allocate memory" "for IPv6 headers\n",__func__)); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; nxt = ip6.ip6_nxt & 0xff; /* Next header type. */ for (off = 0; off < skip - sizeof(struct ip6_hdr);) switch (nxt) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *)(ptr + off); noff = off + ((ip6e->ip6e_len + 1) << 3); /* Sanity check. */ if (noff > skip - sizeof(struct ip6_hdr)) goto error6; /* * Zero out mutable options. */ for (count = off + sizeof(struct ip6_ext); count < noff;) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } ad = ptr[count + 1] + 2; if (count + ad > noff) goto error6; if (ptr[count] & IP6OPT_MUTABLE) memset(ptr + count, 0, ad); count += ad; } if (count != noff) goto error6; /* Advance. */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ ip6e = (struct ip6_ext *) (ptr + off); off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; default: DPRINTF(("%s: unexpected IPv6 header type %d", __func__, off)); error6: if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ } return 0; } /* * ah_input() gets called to verify that an input packet * passes authentication. */ static int ah_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[128]); const struct auth_hash *ahx; struct cryptodesc *crda; struct cryptop *crp; struct xform_data *xd; struct newah *ah; crypto_session_t cryptoid; int hl, rplen, authsize, ahsize, error; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->key_auth != NULL, ("null authentication key")); IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null authentication xform")); /* Figure out header size. */ rplen = HDRSIZE(sav); /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(ah, struct newah *, m, skip, rplen); if (ah == NULL) { DPRINTF(("ah_input: cannot pullup header\n")); AHSTAT_INC(ahs_hdrops); /*XXX*/ error = ENOBUFS; goto bad; } /* Check replay window, if applicable. */ SECASVAR_LOCK(sav); if (sav->replay != NULL && sav->replay->wsize != 0 && ipsec_chkreplay(ntohl(ah->ah_seq), sav) == 0) { SECASVAR_UNLOCK(sav); AHSTAT_INC(ahs_replay); DPRINTF(("%s: packet replay failure: %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); error = EACCES; goto bad; } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Verify AH header length. */ hl = sizeof(struct ah) + (ah->ah_len * sizeof (u_int32_t)); ahx = sav->tdb_authalgxform; authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); if (hl != ahsize) { DPRINTF(("%s: bad authenticator length %u (expecting %lu)" " for packet in SA %s/%08lx\n", __func__, hl, (u_long)ahsize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauthl); error = EACCES; goto bad; } if (skip + ahsize > m->m_pkthdr.len) { DPRINTF(("%s: bad mbuf length %u (expecting %lu)" " for packet in SA %s/%08lx\n", __func__, m->m_pkthdr.len, (u_long)(skip + ahsize), ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauthl); error = EACCES; goto bad; } AHSTAT_ADD(ahs_ibytes, m->m_pkthdr.len - skip - hl); /* Get crypto descriptors. */ crp = crypto_getreq(1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptor\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null crypto descriptor")); crda->crd_skip = 0; crda->crd_len = m->m_pkthdr.len; crda->crd_inject = skip + rplen; /* Authentication operation. */ crda->crd_alg = ahx->type; crda->crd_klen = _KEYBITS(sav->key_auth); crda->crd_key = sav->key_auth->key_data; /* Allocate IPsec-specific opaque crypto info. */ xd = malloc(sizeof(*xd) + skip + rplen + authsize, M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); AHSTAT_INC(ahs_crypto); crypto_freereq(crp); error = ENOBUFS; goto bad; } /* * Save the authenticator, the skipped portion of the packet, * and the AH header. */ m_copydata(m, 0, skip + rplen + authsize, (caddr_t)(xd + 1)); /* Zeroize the authenticator on the packet. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* Save ah_nxt, since ah pointer can become invalid after "massage" */ hl = ah->ah_nxt; /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 0); if (error != 0) { /* NB: mbuf is free'd by ah_massage_headers */ AHSTAT_INC(ahs_hdrops); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); return (error); } /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crp->crp_buf = (caddr_t) m; crp->crp_callback = ah_input_cb; - crp->crp_sid = cryptoid; + crp->crp_session = cryptoid; crp->crp_opaque = (caddr_t) xd; /* These are passed as-is to the callback. */ xd->sav = sav; xd->nxt = hl; xd->protoff = protoff; xd->skip = skip; xd->cryptoid = cryptoid; xd->vnet = curvnet; return (crypto_dispatch(crp)); bad: m_freem(m); key_freesav(&sav); return (error); } /* * AH input callback from the crypto driver. */ static int ah_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); unsigned char calc[AH_ALEN_MAX]; struct mbuf *m; struct xform_data *xd; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crypto_session_t cryptoid; int authsize, rplen, ahsize, error, skip, protoff; uint8_t nxt; m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; skip = xd->skip; nxt = xd->nxt; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } else { AHSTAT_INC(ahs_hist[sav->alg_auth]); crypto_freereq(crp); /* No longer needed. */ crp = NULL; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } /* Figure out header size. */ rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); /* Copy authenticator off the packet. */ m_copydata(m, skip + rplen, authsize, calc); /* Verify authenticator. */ ptr = (caddr_t) (xd + 1); if (timingsafe_bcmp(ptr + skip + rplen, calc, authsize)) { DPRINTF(("%s: authentication hash mismatch for packet " "in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauth); error = EACCES; goto bad; } /* Fix the Next Protocol field. */ ((uint8_t *) ptr)[protoff] = nxt; /* Copyback the saved (uncooked) network headers. */ m_copyback(m, 0, skip, ptr); free(xd, M_XDATA), xd = NULL; /* No longer needed */ /* * Header is now authenticated. */ m->m_flags |= M_AUTHIPHDR|M_AUTHIPDGM; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newah, ah_seq), sizeof (seq), (caddr_t) &seq); SECASVAR_LOCK(sav); if (ipsec_updatereplay(ntohl(seq), sav)) { SECASVAR_UNLOCK(sav); AHSTAT_INC(ahs_replay); error = EACCES; goto bad; } SECASVAR_UNLOCK(sav); } /* * Remove the AH header and authenticator from the mbuf. */ error = m_striphdr(m, skip, ahsize); if (error) { DPRINTF(("%s: mangled mbuf chain for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); goto bad; } switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } CURVNET_RESTORE(); return error; bad: CURVNET_RESTORE(); if (sav) key_freesav(&sav); if (m != NULL) m_freem(m); if (xd != NULL) free(xd, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * AH output routine, called by ipsec[46]_perform_request(). */ static int ah_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); const struct auth_hash *ahx; struct cryptodesc *crda; struct xform_data *xd; struct mbuf *mi; struct cryptop *crp; struct newah *ah; crypto_session_t cryptoid; uint16_t iplen; int error, rplen, authsize, ahsize, maxpacketsize, roff; uint8_t prot; IPSEC_ASSERT(sav != NULL, ("null SA")); ahx = sav->tdb_authalgxform; IPSEC_ASSERT(ahx != NULL, ("null authentication xform")); AHSTAT_INC(ahs_output); /* Figure out header size. */ rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); /* Check for maximum packet size violations. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol family %u, " "SA %s/%08lx\n", __func__, sav->sah->saidx.dst.sa.sa_family, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_nopf); error = EPFNOSUPPORT; goto bad; } if (ahsize + m->m_pkthdr.len > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), ahsize + m->m_pkthdr.len, maxpacketsize)); AHSTAT_INC(ahs_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ AHSTAT_ADD(ahs_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); error = ENOBUFS; goto bad; } /* Inject AH header. */ mi = m_makespace(m, skip, ahsize, &roff); if (mi == NULL) { DPRINTF(("%s: failed to inject %u byte AH header for SA " "%s/%08lx\n", __func__, ahsize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); /*XXX differs from openbsd */ error = ENOBUFS; goto bad; } /* * The AH header is guaranteed by m_makespace() to be in * contiguous memory, at roff bytes offset into the returned mbuf. */ ah = (struct newah *)(mtod(mi, caddr_t) + roff); /* Initialize the AH header. */ m_copydata(m, protoff, sizeof(u_int8_t), (caddr_t) &ah->ah_nxt); ah->ah_len = (ahsize - sizeof(struct ah)) / sizeof(u_int32_t); ah->ah_reserve = 0; ah->ah_spi = sav->spi; /* Zeroize authenticator. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* Zeroize padding */ m_copyback(m, skip + rplen + authsize, ahsize - (rplen + authsize), ipseczeroes); /* Insert packet replay counter, as requested. */ SECASVAR_LOCK(sav); if (sav->replay) { if (sav->replay->count == ~0 && (sav->flags & SADB_X_EXT_CYCSEQ) == 0) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: replay counter wrapped for SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_wrap); error = EACCES; goto bad; } #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; ah->ah_seq = htonl(sav->replay->count); } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Get crypto descriptors. */ crp = crypto_getreq(1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } crda = crp->crp_desc; crda->crd_skip = 0; crda->crd_inject = skip + rplen; crda->crd_len = m->m_pkthdr.len; /* Authentication operation. */ crda->crd_alg = ahx->type; crda->crd_key = sav->key_auth->key_data; crda->crd_klen = _KEYBITS(sav->key_auth); /* Allocate IPsec-specific opaque crypto info. */ xd = malloc(sizeof(struct xform_data) + skip, M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate xform_data\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } /* Save the skipped portion of the packet. */ m_copydata(m, 0, skip, (caddr_t) (xd + 1)); /* * Fix IP header length on the header used for * authentication. We don't need to fix the original * header length as it will be fixed by our caller. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: bcopy(((caddr_t)(xd + 1)) + offsetof(struct ip, ip_len), (caddr_t) &iplen, sizeof(u_int16_t)); iplen = htons(ntohs(iplen) + ahsize); m_copyback(m, offsetof(struct ip, ip_len), sizeof(u_int16_t), (caddr_t) &iplen); break; #endif /* INET */ #ifdef INET6 case AF_INET6: bcopy(((caddr_t)(xd + 1)) + offsetof(struct ip6_hdr, ip6_plen), (caddr_t) &iplen, sizeof(uint16_t)); iplen = htons(ntohs(iplen) + ahsize); m_copyback(m, offsetof(struct ip6_hdr, ip6_plen), sizeof(uint16_t), (caddr_t) &iplen); break; #endif /* INET6 */ } /* Fix the Next Header field in saved header. */ ((uint8_t *) (xd + 1))[protoff] = IPPROTO_AH; /* Update the Next Protocol field in the IP header. */ prot = IPPROTO_AH; m_copyback(m, protoff, sizeof(uint8_t), (caddr_t) &prot); /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 1); if (error != 0) { m = NULL; /* mbuf was free'd by ah_massage_headers. */ free(xd, M_XDATA); crypto_freereq(crp); goto bad; } /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crp->crp_buf = (caddr_t) m; crp->crp_callback = ah_output_cb; - crp->crp_sid = cryptoid; + crp->crp_session = cryptoid; crp->crp_opaque = (caddr_t) xd; /* These are passed as-is to the callback. */ xd->sp = sp; xd->sav = sav; xd->skip = skip; xd->idx = idx; xd->cryptoid = cryptoid; xd->vnet = curvnet; return crypto_dispatch(crp); bad: if (m) m_freem(m); key_freesav(&sav); key_freesp(&sp); return (error); } /* * AH output callback from the crypto driver. */ static int ah_output_cb(struct cryptop *crp) { struct xform_data *xd; struct secpolicy *sp; struct secasvar *sav; struct mbuf *m; crypto_session_t cryptoid; caddr_t ptr; u_int idx; int skip, error; m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sp = xd->sp; sav = xd->sav; skip = xd->skip; idx = xd->idx; cryptoid = xd->cryptoid; ptr = (caddr_t) (xd + 1); /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; m_freem(m); goto bad; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } /* * Copy original headers (with the new protocol number) back * in place. */ m_copyback(m, 0, skip, ptr); free(xd, M_XDATA); crypto_freereq(crp); AHSTAT_INC(ahs_hist[sav->alg_auth]); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { int alen; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ alen = AUTHSIZE(sav); m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, sp, sav, idx); CURVNET_RESTORE(); return (error); bad: CURVNET_RESTORE(); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); key_freesp(&sp); return (error); } static struct xformsw ah_xformsw = { .xf_type = XF_AH, .xf_name = "IPsec AH", .xf_init = ah_init, .xf_zeroize = ah_zeroize, .xf_input = ah_input, .xf_output = ah_output, }; SYSINIT(ah_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_attach, &ah_xformsw); SYSUNINIT(ah_xform_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_detach, &ah_xformsw); Index: head/sys/netipsec/xform_esp.c =================================================================== --- head/sys/netipsec/xform_esp.c (revision 336438) +++ head/sys/netipsec/xform_esp.c (revision 336439) @@ -1,975 +1,975 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_esp.c,v 1.69 2001/06/26 06:18:59 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include #include VNET_DEFINE(int, esp_enable) = 1; VNET_PCPUSTAT_DEFINE(struct espstat, espstat); VNET_PCPUSTAT_SYSINIT(espstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(espstat); #endif /* VIMAGE */ SYSCTL_DECL(_net_inet_esp); SYSCTL_INT(_net_inet_esp, OID_AUTO, esp_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(esp_enable), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_esp, IPSECCTL_STATS, stats, struct espstat, espstat, "ESP statistics (struct espstat, netipsec/esp_var.h"); static int esp_input_cb(struct cryptop *op); static int esp_output_cb(struct cryptop *crp); size_t esp_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { /*XXX not right for null algorithm--does it matter??*/ IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("SA with null xform")); if (sav->flags & SADB_X_EXT_OLD) size = sizeof (struct esp); else size = sizeof (struct newesp); size += sav->tdb_encalgxform->blocksize + 9; /*XXX need alg check???*/ if (sav->tdb_authalgxform != NULL && sav->replay) size += ah_hdrsiz(sav); } else { /* * base header size * + max iv length for CBC mode * + max pad length * + sizeof (pad length field) * + sizeof (next header field) * + max icv supported. */ size = sizeof (struct newesp) + EALG_MAX_BLOCK_LEN + 9 + 16; } return size; } /* * esp_init() is called when an SPI is being set up. */ static int esp_init(struct secasvar *sav, struct xformsw *xsp) { const struct enc_xform *txform; struct cryptoini cria, crie; int keylen; int error; txform = enc_algorithm_lookup(sav->alg_enc); if (txform == NULL) { DPRINTF(("%s: unsupported encryption algorithm %d\n", __func__, sav->alg_enc)); return EINVAL; } if (sav->key_enc == NULL) { DPRINTF(("%s: no encoding key for %s algorithm\n", __func__, txform->name)); return EINVAL; } if ((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) { DPRINTF(("%s: 4-byte IV not supported with protocol\n", __func__)); return EINVAL; } /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */ keylen = _KEYLEN(sav->key_enc) - SAV_ISCTRORGCM(sav) * 4; if (txform->minkey > keylen || keylen > txform->maxkey) { DPRINTF(("%s: invalid key length %u, must be in the range " "[%u..%u] for algorithm %s\n", __func__, keylen, txform->minkey, txform->maxkey, txform->name)); return EINVAL; } if (SAV_ISCTRORGCM(sav)) sav->ivlen = 8; /* RFC4106 3.1 and RFC3686 3.1 */ else sav->ivlen = txform->ivsize; /* * Setup AH-related state. */ if (sav->alg_auth != 0) { error = ah_init0(sav, xsp, &cria); if (error) return error; } /* NB: override anything set in ah_init0 */ sav->tdb_xform = xsp; sav->tdb_encalgxform = txform; /* * Whenever AES-GCM is used for encryption, one * of the AES authentication algorithms is chosen * as well, based on the key size. */ if (sav->alg_enc == SADB_X_EALG_AESGCM16) { switch (keylen) { case AES_128_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES128GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_128; break; case AES_192_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES192GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_192; break; case AES_256_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES256GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_256; break; default: DPRINTF(("%s: invalid key length %u" "for algorithm %s\n", __func__, keylen, txform->name)); return EINVAL; } bzero(&cria, sizeof(cria)); cria.cri_alg = sav->tdb_authalgxform->type; cria.cri_key = sav->key_enc->key_data; cria.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISGCM(sav) * 32; } /* Initialize crypto session. */ bzero(&crie, sizeof(crie)); crie.cri_alg = sav->tdb_encalgxform->type; crie.cri_key = sav->key_enc->key_data; crie.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISCTRORGCM(sav) * 32; if (sav->tdb_authalgxform && sav->tdb_encalgxform) { /* init both auth & enc */ crie.cri_next = &cria; error = crypto_newsession(&sav->tdb_cryptoid, &crie, V_crypto_support); } else if (sav->tdb_encalgxform) { error = crypto_newsession(&sav->tdb_cryptoid, &crie, V_crypto_support); } else if (sav->tdb_authalgxform) { error = crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support); } else { /* XXX cannot happen? */ DPRINTF(("%s: no encoding OR authentication xform!\n", __func__)); error = EINVAL; } return error; } /* * Paranoia. */ static int esp_zeroize(struct secasvar *sav) { /* NB: ah_zerorize free's the crypto session state */ int error = ah_zeroize(sav); if (sav->key_enc) bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc)); sav->tdb_encalgxform = NULL; sav->tdb_xform = NULL; return error; } /* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[128]); const struct auth_hash *esph; const struct enc_xform *espx; struct xform_data *xd; struct cryptodesc *crde; struct cryptop *crp; struct newesp *esp; uint8_t *ivp; crypto_session_t cryptoid; int alen, error, hlen, plen; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); error = EINVAL; /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); ESPSTAT_INC(esps_badilen); goto bad; } /* XXX don't pullup, just copy header */ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp)); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header and auth length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; alen = xform_ah_authsize(esph); /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("%s: payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", __func__, plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long)ntohl(sav->spi))); ESPSTAT_INC(esps_badilen); goto bad; } /* * Check sequence number. */ SECASVAR_LOCK(sav); if (esph != NULL && sav->replay != NULL && sav->replay->wsize != 0) { if (ipsec_chkreplay(ntohl(esp->esp_seq), sav) == 0) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = EACCES; goto bad; } } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Update the counters */ ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen)); /* Get crypto descriptors */ crp = crypto_getreq(esph && espx ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } /* Get IPsec-specific opaque pointer */ xd = malloc(sizeof(*xd) + alen, M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); ESPSTAT_INC(esps_crypto); crypto_freereq(crp); error = ENOBUFS; goto bad; } if (esph != NULL) { struct cryptodesc *crda = crp->crp_desc; IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor")); /* Authentication descriptor */ crda->crd_skip = skip; if (SAV_ISGCM(sav)) crda->crd_len = 8; /* RFC4106 5, SPI + SN */ else crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; crda->crd_alg = esph->type; /* Copy the authenticator */ m_copydata(m, m->m_pkthdr.len - alen, alen, (caddr_t) (xd + 1)); /* Chain authentication request */ crde = crda->crd_next; } else { crde = crp->crp_desc; } /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_input_cb; - crp->crp_sid = cryptoid; + crp->crp_session = cryptoid; crp->crp_opaque = (caddr_t) xd; /* These are passed as-is to the callback */ xd->sav = sav; xd->protoff = protoff; xd->skip = skip; xd->cryptoid = cryptoid; xd->vnet = curvnet; /* Decryption descriptor */ IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor")); crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_inject = skip + hlen - sav->ivlen; if (SAV_ISCTRORGCM(sav)) { ivp = &crde->crd_iv[0]; /* GCM IV Format: RFC4106 4 */ /* CTR IV Format: RFC3686 4 */ /* Salt is last four bytes of key, RFC4106 8.1 */ /* Nonce is last four bytes of key, RFC3686 5.1 */ memcpy(ivp, sav->key_enc->key_data + _KEYLEN(sav->key_enc) - 4, 4); if (SAV_ISCTR(sav)) { /* Initial block counter is 1, RFC3686 4 */ be32enc(&ivp[sav->ivlen + 4], 1); } m_copydata(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]); crde->crd_flags |= CRD_F_IV_EXPLICIT; } crde->crd_alg = espx->type; return (crypto_dispatch(crp)); bad: m_freem(m); key_freesav(&sav); return (error); } /* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[128]); u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN]; const struct auth_hash *esph; struct mbuf *m; struct cryptodesc *crd; struct xform_data *xd; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crypto_session_t cryptoid; int hlen, skip, protoff, error, alen; crd = crp->crp_desc; IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!")); m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; skip = xd->skip; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; esph = sav->tdb_authalgxform; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); /* If authentication was performed, check now. */ if (esph != NULL) { alen = xform_ah_authsize(esph); AHSTAT_INC(ahs_hist[sav->alg_auth]); /* Copy the authenticator from the packet */ m_copydata(m, m->m_pkthdr.len - alen, alen, aalg); ptr = (caddr_t) (xd + 1); /* Verify authenticator */ if (timingsafe_bcmp(ptr, aalg, alen) != 0) { DPRINTF(("%s: authentication hash mismatch for " "packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badauth); error = EACCES; goto bad; } m->m_flags |= M_AUTHIPDGM; /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(xd, M_XDATA), xd = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); SECASVAR_LOCK(sav); if (ipsec_updatereplay(ntohl(seq), sav)) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = EACCES; goto bad; } SECASVAR_UNLOCK(sav); } /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Remove the ESP header and IV from the mbuf. */ error = m_striphdr(m, skip, hlen); if (error) { ESPSTAT_INC(esps_hdrops); DPRINTF(("%s: bad mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); goto bad; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { ESPSTAT_INC(esps_badilen); DPRINTF(("%s: invalid padding length %d for %u byte packet " "in SA %s/%08lx\n", __func__, lastthree[1], m->m_pkthdr.len - skip, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } /* Verify correct decryption by checking the last padding bytes */ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) { if (lastthree[1] != lastthree[0] && lastthree[1] != 0) { ESPSTAT_INC(esps_badenc); DPRINTF(("%s: decryption failed for packet in " "SA %s/%08lx\n", __func__, ipsec_address( &sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } } /* Trim the mbuf chain to remove trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2); switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } CURVNET_RESTORE(); return error; bad: CURVNET_RESTORE(); if (sav != NULL) key_freesav(&sav); if (m != NULL) m_freem(m); if (xd != NULL) free(xd, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * ESP output routine, called by ipsec[46]_perform_request(). */ static int esp_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); struct cryptodesc *crde = NULL, *crda = NULL; struct cryptop *crp; const struct auth_hash *esph; const struct enc_xform *espx; struct mbuf *mo = NULL; struct xform_data *xd; struct secasindex *saidx; unsigned char *pad; uint8_t *ivp; uint64_t cntr; crypto_session_t cryptoid; int hlen, rlen, padding, blks, alen, i, roff; int error, maxpacketsize; uint8_t prot; IPSEC_ASSERT(sav != NULL, ("null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; IPSEC_ASSERT(espx != NULL, ("null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * RFC4303 2.4 Requires 4 byte alignment. */ blks = MAX(4, espx->blocksize); /* Cipher blocksize */ /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; alen = xform_ah_authsize(esph); ESPSTAT_INC(esps_output); saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", __func__, saidx->dst.sa.sa_family, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_nopf); error = EPFNOSUPPORT; goto bad; } /* DPRINTF(("%s: skip %d hlen %d rlen %d padding %d alen %d blksd %d\n", __func__, skip, hlen, rlen, padding, alen, blks)); */ if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); ESPSTAT_INC(esps_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ ESPSTAT_ADD(esps_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("%s: %u byte ESP hdr inject failed for SA %s/%08lx\n", __func__, hlen, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(uint32_t)); SECASVAR_LOCK(sav); if (sav->replay) { uint32_t replay; #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; replay = htonl(sav->replay->count); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(uint32_t), sizeof(uint32_t)); } cryptoid = sav->tdb_cryptoid; if (SAV_ISCTRORGCM(sav)) cntr = sav->cntr++; SECASVAR_UNLOCK(sav); /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("%s: m_pad failed for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: (void) read_random(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptors. */ crp = crypto_getreq(esph != NULL ? 2 : 1); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } /* IPsec-specific opaque crypto info. */ xd = malloc(sizeof(struct xform_data), M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate xform_data\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } crde = crp->crp_desc; crda = crde->crd_next; /* Encryption descriptor. */ crde->crd_skip = skip + hlen; crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen); crde->crd_flags = CRD_F_ENCRYPT; crde->crd_inject = skip + hlen - sav->ivlen; /* Encryption operation. */ crde->crd_alg = espx->type; if (SAV_ISCTRORGCM(sav)) { ivp = &crde->crd_iv[0]; /* GCM IV Format: RFC4106 4 */ /* CTR IV Format: RFC3686 4 */ /* Salt is last four bytes of key, RFC4106 8.1 */ /* Nonce is last four bytes of key, RFC3686 5.1 */ memcpy(ivp, sav->key_enc->key_data + _KEYLEN(sav->key_enc) - 4, 4); be64enc(&ivp[4], cntr); if (SAV_ISCTR(sav)) { /* Initial block counter is 1, RFC3686 4 */ /* XXXAE: should we use this only for first packet? */ be32enc(&ivp[sav->ivlen + 4], 1); } m_copyback(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]); crde->crd_flags |= CRD_F_IV_EXPLICIT|CRD_F_IV_PRESENT; } /* Callback parameters */ xd->sp = sp; xd->sav = sav; xd->idx = idx; xd->cryptoid = cryptoid; xd->vnet = curvnet; /* Crypto operation descriptor. */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; if (V_async_crypto) crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crp->crp_buf = (caddr_t) m; crp->crp_callback = esp_output_cb; crp->crp_opaque = (caddr_t) xd; - crp->crp_sid = cryptoid; + crp->crp_session = cryptoid; if (esph) { /* Authentication descriptor. */ crda->crd_alg = esph->type; crda->crd_skip = skip; if (SAV_ISGCM(sav)) crda->crd_len = 8; /* RFC4106 5, SPI + SN */ else crda->crd_len = m->m_pkthdr.len - (skip + alen); crda->crd_inject = m->m_pkthdr.len - alen; } return crypto_dispatch(crp); bad: if (m) m_freem(m); key_freesav(&sav); key_freesp(&sp); return (error); } /* * ESP output callback from the crypto driver. */ static int esp_output_cb(struct cryptop *crp) { struct xform_data *xd; struct secpolicy *sp; struct secasvar *sav; struct mbuf *m; crypto_session_t cryptoid; u_int idx; int error; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); m = (struct mbuf *) crp->crp_buf; sp = xd->sp; sav = xd->sav; idx = xd->idx; cryptoid = xd->cryptoid; /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; m_freem(m); goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } free(xd, M_XDATA); crypto_freereq(crp); ESPSTAT_INC(esps_hist[sav->alg_enc]); if (sav->tdb_authalgxform != NULL) AHSTAT_INC(ahs_hist[sav->alg_auth]); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { static unsigned char ipseczeroes[AH_HMAC_MAXHASHLEN]; const struct auth_hash *esph; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ esph = sav->tdb_authalgxform; if (esph != NULL) { int alen; alen = xform_ah_authsize(esph); m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, sp, sav, idx); CURVNET_RESTORE(); return (error); bad: CURVNET_RESTORE(); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); key_freesp(&sp); return (error); } static struct xformsw esp_xformsw = { .xf_type = XF_ESP, .xf_name = "IPsec ESP", .xf_init = esp_init, .xf_zeroize = esp_zeroize, .xf_input = esp_input, .xf_output = esp_output, }; SYSINIT(esp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_attach, &esp_xformsw); SYSUNINIT(esp_xform_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_detach, &esp_xformsw); Index: head/sys/netipsec/xform_ipcomp.c =================================================================== --- head/sys/netipsec/xform_ipcomp.c (revision 336438) +++ head/sys/netipsec/xform_ipcomp.c (revision 336439) @@ -1,777 +1,776 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_ipcomp.c,v 1.1 2001/07/05 12:08:52 jjbg Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* IP payload compression protocol (IPComp), see RFC 2393 */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include #include #include #include #include VNET_DEFINE(int, ipcomp_enable) = 1; VNET_PCPUSTAT_DEFINE(struct ipcompstat, ipcompstat); VNET_PCPUSTAT_SYSINIT(ipcompstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ipcompstat); #endif /* VIMAGE */ SYSCTL_DECL(_net_inet_ipcomp); SYSCTL_INT(_net_inet_ipcomp, OID_AUTO, ipcomp_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ipcomp_enable), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_ipcomp, IPSECCTL_STATS, stats, struct ipcompstat, ipcompstat, "IPCOMP statistics (struct ipcompstat, netipsec/ipcomp_var.h"); static int ipcomp_input_cb(struct cryptop *crp); static int ipcomp_output_cb(struct cryptop *crp); /* * RFC 3173 p 2.2. Non-Expansion Policy: * If the total size of a compressed payload and the IPComp header, as * defined in section 3, is not smaller than the size of the original * payload, the IP datagram MUST be sent in the original non-compressed * form. * * When we use IPComp in tunnel mode, for small packets we will receive * encapsulated IP-IP datagrams without any compression and without IPComp * header. */ static int ipcomp_encapcheck(union sockaddr_union *src, union sockaddr_union *dst) { struct secasvar *sav; sav = key_allocsa_tunnel(src, dst, IPPROTO_IPCOMP); if (sav == NULL) return (0); key_freesav(&sav); if (src->sa.sa_family == AF_INET) return (sizeof(struct in_addr) << 4); else return (sizeof(struct in6_addr) << 4); } static int ipcomp_nonexp_input(struct mbuf *m, int off, int proto, void *arg __unused) { int isr; switch (proto) { #ifdef INET case IPPROTO_IPV4: isr = NETISR_IP; break; #endif #ifdef INET6 case IPPROTO_IPV6: isr = NETISR_IPV6; break; #endif default: IPCOMPSTAT_INC(ipcomps_nopf); m_freem(m); return (IPPROTO_DONE); } m_adj(m, off); IPCOMPSTAT_ADD(ipcomps_ibytes, m->m_pkthdr.len); IPCOMPSTAT_INC(ipcomps_input); netisr_dispatch(isr, m); return (IPPROTO_DONE); } /* * ipcomp_init() is called when an CPI is being set up. */ static int ipcomp_init(struct secasvar *sav, struct xformsw *xsp) { const struct comp_algo *tcomp; struct cryptoini cric; /* NB: algorithm really comes in alg_enc and not alg_comp! */ tcomp = comp_algorithm_lookup(sav->alg_enc); if (tcomp == NULL) { DPRINTF(("%s: unsupported compression algorithm %d\n", __func__, sav->alg_comp)); return EINVAL; } sav->alg_comp = sav->alg_enc; /* set for doing histogram */ sav->tdb_xform = xsp; sav->tdb_compalgxform = tcomp; /* Initialize crypto session */ bzero(&cric, sizeof (cric)); cric.cri_alg = sav->tdb_compalgxform->type; return crypto_newsession(&sav->tdb_cryptoid, &cric, V_crypto_support); } /* * ipcomp_zeroize() used when IPCA is deleted */ static int ipcomp_zeroize(struct secasvar *sav) { - int err; - err = crypto_freesession(sav->tdb_cryptoid); - sav->tdb_cryptoid = 0; - return err; + crypto_freesession(sav->tdb_cryptoid); + sav->tdb_cryptoid = NULL; + return 0; } /* * ipcomp_input() gets called to uncompress an input packet */ static int ipcomp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { struct xform_data *xd; struct cryptodesc *crdc; struct cryptop *crp; struct ipcomp *ipcomp; caddr_t addr; int error, hlen = IPCOMP_HLENGTH; /* * Check that the next header of the IPComp is not IPComp again, before * doing any real work. Given it is not possible to do double * compression it means someone is playing tricks on us. */ error = ENOBUFS; if (m->m_len < skip + hlen && (m = m_pullup(m, skip + hlen)) == NULL) { IPCOMPSTAT_INC(ipcomps_hdrops); /*XXX*/ DPRINTF(("%s: m_pullup failed\n", __func__)); key_freesav(&sav); return (error); } addr = (caddr_t) mtod(m, struct ip *) + skip; ipcomp = (struct ipcomp *)addr; if (ipcomp->comp_nxt == IPPROTO_IPCOMP) { IPCOMPSTAT_INC(ipcomps_pdrops); /* XXX have our own stats? */ DPRINTF(("%s: recursive compression detected\n", __func__)); error = EINVAL; goto bad; } /* Get crypto descriptors */ crp = crypto_getreq(1); if (crp == NULL) { DPRINTF(("%s: no crypto descriptors\n", __func__)); IPCOMPSTAT_INC(ipcomps_crypto); goto bad; } /* Get IPsec-specific opaque pointer */ xd = malloc(sizeof(*xd), M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: cannot allocate xform_data\n", __func__)); IPCOMPSTAT_INC(ipcomps_crypto); crypto_freereq(crp); goto bad; } crdc = crp->crp_desc; crdc->crd_skip = skip + hlen; crdc->crd_len = m->m_pkthdr.len - (skip + hlen); crdc->crd_inject = skip; /* Decompression operation */ crdc->crd_alg = sav->tdb_compalgxform->type; /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len - (skip + hlen); crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = ipcomp_input_cb; crp->crp_opaque = (caddr_t) xd; /* These are passed as-is to the callback */ xd->sav = sav; xd->protoff = protoff; xd->skip = skip; xd->vnet = curvnet; SECASVAR_LOCK(sav); - crp->crp_sid = xd->cryptoid = sav->tdb_cryptoid; + crp->crp_session = xd->cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); return crypto_dispatch(crp); bad: m_freem(m); key_freesav(&sav); return (error); } /* * IPComp input callback from the crypto driver. */ static int ipcomp_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); struct xform_data *xd; struct mbuf *m; struct secasvar *sav; struct secasindex *saidx; caddr_t addr; crypto_session_t cryptoid; int hlen = IPCOMP_HLENGTH, error, clen; int skip, protoff; uint8_t nproto; m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; skip = xd->skip; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } IPCOMPSTAT_INC(ipcomps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { IPCOMPSTAT_INC(ipcomps_crypto); DPRINTF(("%s: null mbuf returned from crypto\n", __func__)); error = EINVAL; goto bad; } IPCOMPSTAT_INC(ipcomps_hist[sav->alg_comp]); clen = crp->crp_olen; /* Length of data after processing */ /* Release the crypto descriptors */ free(xd, M_XDATA), xd = NULL; crypto_freereq(crp), crp = NULL; /* In case it's not done already, adjust the size of the mbuf chain */ m->m_pkthdr.len = clen + hlen + skip; if (m->m_len < skip + hlen && (m = m_pullup(m, skip + hlen)) == NULL) { IPCOMPSTAT_INC(ipcomps_hdrops); /*XXX*/ DPRINTF(("%s: m_pullup failed\n", __func__)); error = EINVAL; /*XXX*/ goto bad; } /* Keep the next protocol field */ addr = (caddr_t) mtod(m, struct ip *) + skip; nproto = ((struct ipcomp *) addr)->comp_nxt; /* Remove the IPCOMP header */ error = m_striphdr(m, skip, hlen); if (error) { IPCOMPSTAT_INC(ipcomps_hdrops); DPRINTF(("%s: bad mbuf chain, IPCA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); goto bad; } /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), (u_int8_t *) &nproto); switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } CURVNET_RESTORE(); return error; bad: CURVNET_RESTORE(); if (sav != NULL) key_freesav(&sav); if (m != NULL) m_freem(m); if (xd != NULL) free(xd, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * IPComp output routine, called by ipsec[46]_perform_request() */ static int ipcomp_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); const struct comp_algo *ipcompx; struct cryptodesc *crdc; struct cryptop *crp; struct xform_data *xd; int error, ralen, maxpacketsize; IPSEC_ASSERT(sav != NULL, ("null SA")); ipcompx = sav->tdb_compalgxform; IPSEC_ASSERT(ipcompx != NULL, ("null compression xform")); /* * Do not touch the packet in case our payload to compress * is lower than the minimal threshold of the compression * alogrithm. We will just send out the data uncompressed. * See RFC 3173, 2.2. Non-Expansion Policy. */ if (m->m_pkthdr.len <= ipcompx->minlen) { IPCOMPSTAT_INC(ipcomps_threshold); return ipsec_process_done(m, sp, sav, idx); } ralen = m->m_pkthdr.len - skip; /* Raw payload length before comp. */ IPCOMPSTAT_INC(ipcomps_output); /* Check for maximum packet size violations. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: IPCOMPSTAT_INC(ipcomps_nopf); DPRINTF(("%s: unknown/unsupported protocol family %d, " "IPCA %s/%08lx\n", __func__, sav->sah->saidx.dst.sa.sa_family, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EPFNOSUPPORT; goto bad; } if (ralen + skip + IPCOMP_HLENGTH > maxpacketsize) { IPCOMPSTAT_INC(ipcomps_toobig); DPRINTF(("%s: packet in IPCA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), ralen + skip + IPCOMP_HLENGTH, maxpacketsize)); error = EMSGSIZE; goto bad; } /* Update the counters */ IPCOMPSTAT_ADD(ipcomps_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { IPCOMPSTAT_INC(ipcomps_hdrops); DPRINTF(("%s: cannot clone mbuf chain, IPCA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = ENOBUFS; goto bad; } /* Ok now, we can pass to the crypto processing. */ /* Get crypto descriptors */ crp = crypto_getreq(1); if (crp == NULL) { IPCOMPSTAT_INC(ipcomps_crypto); DPRINTF(("%s: failed to acquire crypto descriptor\n",__func__)); error = ENOBUFS; goto bad; } crdc = crp->crp_desc; /* Compression descriptor */ crdc->crd_skip = skip; crdc->crd_len = ralen; crdc->crd_flags = CRD_F_COMP; crdc->crd_inject = skip; /* Compression operation */ crdc->crd_alg = ipcompx->type; /* IPsec-specific opaque crypto info */ xd = malloc(sizeof(struct xform_data), M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { IPCOMPSTAT_INC(ipcomps_crypto); DPRINTF(("%s: failed to allocate xform_data\n", __func__)); crypto_freereq(crp); error = ENOBUFS; goto bad; } xd->sp = sp; xd->sav = sav; xd->idx = idx; xd->skip = skip; xd->protoff = protoff; xd->vnet = curvnet; /* Crypto operation descriptor */ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC; crp->crp_buf = (caddr_t) m; crp->crp_callback = ipcomp_output_cb; crp->crp_opaque = (caddr_t) xd; SECASVAR_LOCK(sav); - crp->crp_sid = xd->cryptoid = sav->tdb_cryptoid; + crp->crp_session = xd->cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); return crypto_dispatch(crp); bad: if (m) m_freem(m); key_freesav(&sav); key_freesp(&sp); return (error); } /* * IPComp output callback from the crypto driver. */ static int ipcomp_output_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); struct xform_data *xd; struct secpolicy *sp; struct secasvar *sav; struct mbuf *m; crypto_session_t cryptoid; u_int idx; int error, skip, protoff; m = (struct mbuf *) crp->crp_buf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); idx = xd->idx; sp = xd->sp; sav = xd->sav; skip = xd->skip; protoff = xd->protoff; cryptoid = xd->cryptoid; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ - if (ipsec_updateid(sav, &crp->crp_sid, &cryptoid) != 0) + if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); - xd->cryptoid = crp->crp_sid; + xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } IPCOMPSTAT_INC(ipcomps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } /* Shouldn't happen... */ if (m == NULL) { IPCOMPSTAT_INC(ipcomps_crypto); DPRINTF(("%s: bogus return buffer from crypto\n", __func__)); error = EINVAL; goto bad; } IPCOMPSTAT_INC(ipcomps_hist[sav->alg_comp]); if (crp->crp_ilen - skip > crp->crp_olen) { struct mbuf *mo; struct ipcomp *ipcomp; int roff; uint8_t prot; /* Compression helped, inject IPCOMP header. */ mo = m_makespace(m, skip, IPCOMP_HLENGTH, &roff); if (mo == NULL) { IPCOMPSTAT_INC(ipcomps_wrap); DPRINTF(("%s: IPCOMP header inject failed " "for IPCA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = ENOBUFS; goto bad; } ipcomp = (struct ipcomp *)(mtod(mo, caddr_t) + roff); /* Initialize the IPCOMP header */ /* XXX alignment always correct? */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: ipcomp->comp_nxt = mtod(m, struct ip *)->ip_p; break; #endif /* INET */ #ifdef INET6 case AF_INET6: ipcomp->comp_nxt = mtod(m, struct ip6_hdr *)->ip6_nxt; break; #endif } ipcomp->comp_flags = 0; ipcomp->comp_cpi = htons((u_int16_t) ntohl(sav->spi)); /* Fix Next Protocol in IPv4/IPv6 header */ prot = IPPROTO_IPCOMP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *)&prot); /* Adjust the length in the IP header */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len); break; #endif /* INET */ #ifdef INET6 case AF_INET6: mtod(m, struct ip6_hdr *)->ip6_plen = htons(m->m_pkthdr.len) - sizeof(struct ip6_hdr); break; #endif /* INET6 */ default: IPCOMPSTAT_INC(ipcomps_nopf); DPRINTF(("%s: unknown/unsupported protocol " "family %d, IPCA %s/%08lx\n", __func__, sav->sah->saidx.dst.sa.sa_family, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EPFNOSUPPORT; goto bad; } } else { /* Compression was useless, we have lost time. */ IPCOMPSTAT_INC(ipcomps_uncompr); DPRINTF(("%s: compressions was useless %d - %d <= %d\n", __func__, crp->crp_ilen, skip, crp->crp_olen)); /* XXX remember state to not compress the next couple * of packets, RFC 3173, 2.2. Non-Expansion Policy */ } /* Release the crypto descriptor */ free(xd, M_XDATA); crypto_freereq(crp); /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, sp, sav, idx); CURVNET_RESTORE(); return (error); bad: if (m) m_freem(m); CURVNET_RESTORE(); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); key_freesp(&sp); return (error); } #ifdef INET static int ipcomp4_nonexp_encapcheck(const struct mbuf *m, int off, int proto, void *arg __unused) { union sockaddr_union src, dst; const struct ip *ip; if (V_ipcomp_enable == 0) return (0); if (proto != IPPROTO_IPV4 && proto != IPPROTO_IPV6) return (0); bzero(&src, sizeof(src)); bzero(&dst, sizeof(dst)); src.sa.sa_family = dst.sa.sa_family = AF_INET; src.sin.sin_len = dst.sin.sin_len = sizeof(struct sockaddr_in); ip = mtod(m, const struct ip *); src.sin.sin_addr = ip->ip_src; dst.sin.sin_addr = ip->ip_dst; return (ipcomp_encapcheck(&src, &dst)); } static const struct encaptab *ipe4_cookie = NULL; static const struct encap_config ipv4_encap_cfg = { .proto = -1, .min_length = sizeof(struct ip), .exact_match = sizeof(in_addr_t) << 4, .check = ipcomp4_nonexp_encapcheck, .input = ipcomp_nonexp_input }; #endif #ifdef INET6 static int ipcomp6_nonexp_encapcheck(const struct mbuf *m, int off, int proto, void *arg __unused) { union sockaddr_union src, dst; const struct ip6_hdr *ip6; if (V_ipcomp_enable == 0) return (0); if (proto != IPPROTO_IPV4 && proto != IPPROTO_IPV6) return (0); bzero(&src, sizeof(src)); bzero(&dst, sizeof(dst)); src.sa.sa_family = dst.sa.sa_family = AF_INET; src.sin6.sin6_len = dst.sin6.sin6_len = sizeof(struct sockaddr_in6); ip6 = mtod(m, const struct ip6_hdr *); src.sin6.sin6_addr = ip6->ip6_src; dst.sin6.sin6_addr = ip6->ip6_dst; if (IN6_IS_SCOPE_LINKLOCAL(&src.sin6.sin6_addr)) { /* XXX: sa6_recoverscope() */ src.sin6.sin6_scope_id = ntohs(src.sin6.sin6_addr.s6_addr16[1]); src.sin6.sin6_addr.s6_addr16[1] = 0; } if (IN6_IS_SCOPE_LINKLOCAL(&dst.sin6.sin6_addr)) { /* XXX: sa6_recoverscope() */ dst.sin6.sin6_scope_id = ntohs(dst.sin6.sin6_addr.s6_addr16[1]); dst.sin6.sin6_addr.s6_addr16[1] = 0; } return (ipcomp_encapcheck(&src, &dst)); } static const struct encaptab *ipe6_cookie = NULL; static const struct encap_config ipv6_encap_cfg = { .proto = -1, .min_length = sizeof(struct ip6_hdr), .exact_match = sizeof(struct in6_addr) << 4, .check = ipcomp6_nonexp_encapcheck, .input = ipcomp_nonexp_input }; #endif static struct xformsw ipcomp_xformsw = { .xf_type = XF_IPCOMP, .xf_name = "IPcomp", .xf_init = ipcomp_init, .xf_zeroize = ipcomp_zeroize, .xf_input = ipcomp_input, .xf_output = ipcomp_output, }; static void ipcomp_attach(void) { #ifdef INET ipe4_cookie = ip_encap_attach(&ipv4_encap_cfg, NULL, M_WAITOK); #endif #ifdef INET6 ipe6_cookie = ip6_encap_attach(&ipv6_encap_cfg, NULL, M_WAITOK); #endif xform_attach(&ipcomp_xformsw); } static void ipcomp_detach(void) { #ifdef INET ip_encap_detach(ipe4_cookie); #endif #ifdef INET6 ip6_encap_detach(ipe6_cookie); #endif xform_detach(&ipcomp_xformsw); } SYSINIT(ipcomp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ipcomp_attach, NULL); SYSUNINIT(ipcomp_xform_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ipcomp_detach, NULL); Index: head/sys/opencrypto/_cryptodev.h =================================================================== --- head/sys/opencrypto/_cryptodev.h (revision 336438) +++ head/sys/opencrypto/_cryptodev.h (revision 336439) @@ -1,8 +1,8 @@ /* * This trivial work is released to the public domain, or licensed under the * terms of the CC0, at your option. * $FreeBSD$ */ #pragma once -typedef __uint64_t crypto_session_t; +typedef struct crypto_session *crypto_session_t; Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 336438) +++ head/sys/opencrypto/crypto.c (revision 336439) @@ -1,1781 +1,1853 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #define CRYPTO_TIMING /* enable timing support */ #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for M_XDATA */ #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif +struct crypto_session { + device_t parent; + void *softc; + uint32_t hid; + uint32_t capabilities; +}; + SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each algorithm they support with crypto_register() and crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; /* (d) device/driver */ u_int32_t cc_sessions; /* (d) # of sessions */ u_int32_t cc_koperations; /* (d) # os asym operations */ /* * Largest possible operator length (in bits) for each type of * encryption algorithm. XXX not used */ u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ + size_t cc_session_size; }; static struct cryptocap *crypto_drivers = NULL; static int crypto_drivers_num = 0; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ u_int32_t reorder_ops; /* total ordered sym jobs received */ u_int32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); static uma_zone_t cryptop_zone; static uma_zone_t cryptodesc_zone; +static uma_zone_t cryptoses_zone; int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); int crypto_devallowsoft = 0; /* only use hardware crypto */ SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp, int flags); +static void crypto_remove(struct cryptocap *cap); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static struct cryptostats cryptostats; SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, cryptostats, "Crypto system statistics"); #ifdef CRYPTO_TIMING static int crypto_timing = 0; SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, &crypto_timing, 0, "Enable/disable crypto timing support"); #endif /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); - if (cryptodesc_zone == NULL || cryptop_zone == NULL) { + cryptoses_zone = uma_zcreate("crypto_session", + sizeof(struct crypto_session), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, UMA_ZONE_ZINIT); + + if (cryptodesc_zone == NULL || cryptop_zone == NULL || + cryptoses_zone == NULL) { printf("crypto_init: cannot setup crypto zones\n"); error = ENOMEM; goto bad; } crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_num * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); if (crypto_drivers == NULL) { printf("crypto_init: cannot setup crypto drivers\n"); error = ENOMEM; goto bad; } if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO, taskqueue_thread_enqueue, &crypto_tq); if (crypto_tq == NULL) { printf("crypto init: cannot setup crypto taskqueue\n"); error = ENOMEM; goto bad; } taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (crypto_ret_workers == NULL) { error = ENOMEM; printf("crypto_init: cannot allocate ret workers\n"); goto bad; } FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ if (crypto_drivers != NULL) free(crypto_drivers, M_CRYPTO_DATA); + if (cryptoses_zone != NULL) + uma_zdestroy(cryptoses_zone); if (cryptodesc_zone != NULL) uma_zdestroy(cryptodesc_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } +uint32_t +crypto_ses2hid(crypto_session_t crypto_session) +{ + return (crypto_session->hid); +} + +uint32_t +crypto_ses2caps(crypto_session_t crypto_session) +{ + return (crypto_session->capabilities); +} + +void * +crypto_get_driver_session(crypto_session_t crypto_session) +{ + return (crypto_session->softc); +} + static struct cryptocap * crypto_checkdriver(u_int32_t hid) { if (crypto_drivers == NULL) return NULL; return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); } /* * Compare a driver's list of supported algorithms against another * list; return non-zero if all algorithms are supported. */ static int driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) { const struct cryptoini *cr; /* See if all the algorithms are supported. */ for (cr = cri; cr; cr = cr->cri_next) if (cap->cc_alg[cr->cri_alg] == 0) return 0; return 1; } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. * The algorithm we use here is pretty stupid; just use the * first driver that supports all the algorithms we need. If there * are multiple drivers we choose the driver with the fewest active * sessions. We prefer hardware-backed drivers to software ones. * * XXX We need more smarts here (in real life too, but that's * XXX another story altogether). */ static struct cryptocap * crypto_select_driver(const struct cryptoini *cri, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_num; hid++) { cap = &crypto_drivers[hid]; /* * If it's not initialized, is in the process of * going away, or is not appropriate (hardware * or software based on match), then skip. */ if (cap->cc_dev == NULL || (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (driver_suitable(cap, cri)) { if (best == NULL || cap->cc_sessions < best->cc_sessions) best = cap; } } if (best == NULL && match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int -crypto_newsession(crypto_session_t *sid, struct cryptoini *cri, int crid) +crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int crid) { + crypto_session_t res; + void *softc_mem; struct cryptocap *cap; - u_int32_t hid, lid; + u_int32_t hid; + size_t softc_size; int err; +restart: + res = NULL; + softc_mem = NULL; + CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && !driver_suitable(cap, cri)) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(cri, crid); /* * if NULL then can't do everything in one session. * XXX Fix this. We need to inject a "virtual" session * XXX layer right about here. */ } - if (cap != NULL) { - /* Call the driver initialization routine. */ - hid = cap - crypto_drivers; - lid = hid; /* Pass the driver ID. */ - err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri); - if (err == 0) { - (*sid) = (cap->cc_flags & 0xff000000) - | (hid & 0x00ffffff); - (*sid) <<= 32; - (*sid) |= (lid & 0xffffffff); - cap->cc_sessions++; - } else - CRYPTDEB("dev newsession failed: %d", err); - } else { + if (cap == NULL) { CRYPTDEB("no driver"); err = EOPNOTSUPP; + goto out; } + cap->cc_sessions++; + softc_size = cap->cc_session_size; + hid = cap - crypto_drivers; + cap = NULL; CRYPTO_DRIVER_UNLOCK(); + + softc_mem = malloc(softc_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); + res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); + res->softc = softc_mem; + + CRYPTO_DRIVER_LOCK(); + cap = crypto_checkdriver(hid); + if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0) { + cap->cc_sessions--; + crypto_remove(cap); + cap = NULL; + } + if (cap == NULL) { + free(softc_mem, M_CRYPTO_DATA); + uma_zfree(cryptoses_zone, res); + CRYPTO_DRIVER_UNLOCK(); + goto restart; + } + + /* Call the driver initialization routine. */ + err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, cri); + if (err != 0) { + CRYPTDEB("dev newsession failed: %d", err); + goto out; + } + + res->capabilities = cap->cc_flags & 0xff000000; + res->hid = hid; + *cses = res; + +out: + CRYPTO_DRIVER_UNLOCK(); + if (err != 0) { + free(softc_mem, M_CRYPTO_DATA); + if (res != NULL) + uma_zfree(cryptoses_zone, res); + } return err; } static void crypto_remove(struct cryptocap *cap) { mtx_assert(&crypto_drivers_mtx, MA_OWNED); if (cap->cc_sessions == 0 && cap->cc_koperations == 0) bzero(cap, sizeof(*cap)); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ -int -crypto_freesession(crypto_session_t sid) +void +crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; + void *ses; + size_t ses_size; u_int32_t hid; - int err; + if (cses == NULL) + return; + CRYPTO_DRIVER_LOCK(); - if (crypto_drivers == NULL) { - err = EINVAL; - goto done; - } - - /* Determine two IDs. */ - hid = CRYPTO_SESID2HID(sid); - - if (hid >= crypto_drivers_num) { - err = ENOENT; - goto done; - } + hid = crypto_ses2hid(cses); + KASSERT(hid < crypto_drivers_num, + ("bogus crypto_session %p hid %u", cses, hid)); cap = &crypto_drivers[hid]; + ses = cses->softc; + ses_size = cap->cc_session_size; + if (cap->cc_sessions) cap->cc_sessions--; /* Call the driver cleanup routine, if available. */ - err = CRYPTODEV_FREESESSION(cap->cc_dev, sid); + CRYPTODEV_FREESESSION(cap->cc_dev, cses); + explicit_bzero(ses, ses_size); + free(ses, M_CRYPTO_DATA); + uma_zfree(cryptoses_zone, cses); + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) crypto_remove(cap); -done: CRYPTO_DRIVER_UNLOCK(); - return err; } /* * Return an unused driver id. Used by drivers prior to registering * support for the algorithms they handle. */ int32_t -crypto_get_driverid(device_t dev, int flags) +crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { printf("%s: no flags specified when registering driver\n", device_get_nameunit(dev)); return -1; } CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_num; i++) { if (crypto_drivers[i].cc_dev == NULL && (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { break; } } /* Out of entries, allocate some more. */ if (i == crypto_drivers_num) { /* Be careful about wrap-around. */ if (2 * crypto_drivers_num <= crypto_drivers_num) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); return -1; } newdrv = malloc(2 * crypto_drivers_num * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (newdrv == NULL) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: no space to expand driver table!\n"); return -1; } bcopy(crypto_drivers, newdrv, crypto_drivers_num * sizeof(struct cryptocap)); crypto_drivers_num *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } /* NB: state is zero'd on free */ crypto_drivers[i].cc_sessions = 1; /* Mark */ crypto_drivers[i].cc_dev = dev; crypto_drivers[i].cc_flags = flags; + crypto_drivers[i].cc_session_size = sessionsize; if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); CRYPTO_DRIVER_UNLOCK(); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_num; i++) { device_t dev = crypto_drivers[i].cc_dev; if (dev == NULL || (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) continue; if (strncmp(match, device_get_nameunit(dev), len) == 0 || strncmp(match, device_get_name(dev), len) == 0) break; } CRYPTO_DRIVER_UNLOCK(); return i < crypto_drivers_num ? i : -1; } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap = crypto_checkdriver(hid); return cap != NULL ? cap->cc_dev : NULL; } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap = crypto_checkdriver(hid); return cap != NULL ? cap->cc_flags : 0; } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Register support for a non-key-related algorithm. This routine * is called once for each such algorithm supported by a driver. */ int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, u_int32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); /* NB: algorithms are in the range [1..max] */ if (cap != NULL && (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; cap->cc_max_op_len[alg] = maxoplen; if (bootverbose) printf("crypto: %s registers alg %u flags %u maxoplen %u\n" , device_get_nameunit(cap->cc_dev) , alg , flags , maxoplen ); cap->cc_sessions = 0; /* Unmark */ err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } static void driver_finis(struct cryptocap *cap) { u_int32_t ses, kops; CRYPTO_DRIVER_ASSERT(); ses = cap->cc_sessions; kops = cap->cc_koperations; bzero(cap, sizeof(*cap)); if (ses != 0 || kops != 0) { /* * If there are pending sessions, * just mark as invalid. */ cap->cc_flags |= CRYPTOCAP_F_CLEANUP; cap->cc_sessions = ses; cap->cc_koperations = kops; } } /* * Unregister a crypto driver. If there are pending sessions using it, * leave enough information around so that subsequent calls using those * sessions will correctly detect the driver has been unregistered and * reroute requests. */ int crypto_unregister(u_int32_t driverid, int alg) { struct cryptocap *cap; int i, err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && cap->cc_alg[alg] != 0) { cap->cc_alg[alg] = 0; cap->cc_max_op_len[alg] = 0; /* Was this the last algorithm ? */ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) if (cap->cc_alg[i] != 0) break; if (i == CRYPTO_ALGORITHM_MAX + 1) driver_finis(cap); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(u_int32_t driverid) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { driver_finis(cap); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; u_int32_t hid; int result; cryptostats.cs_ops++; #ifdef CRYPTO_TIMING if (crypto_timing) binuptime(&crp->crp_tstamp); #endif - crp->crp_retw_id = crp->crp_sid % crypto_workers_num; + crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { - hid = CRYPTO_SESID2HID(crp->crp_sid); + hid = crypto_ses2hid(crp->crp_session); /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crypto_checkdriver(hid); /* Driver cannot disappeared when there is an active session. */ KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; cryptostats.cs_kops++; error = crypto_kinvoke(krp, krp->krp_crid); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_num; hid++) { cap = &crypto_drivers[hid]; /* * If it's not initialized, is in the process of * going away, or is not appropriate (hardware * or software based on match), then skip. */ if (cap->cc_dev == NULL || (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp, int crid) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the necessary * algorithm and, if s/w drivers are excluded, * it must be registered as hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL && !cap->cc_kqblocked) { krp->krp_hid = cap - crypto_drivers; cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); CRYPTO_DRIVER_LOCK(); if (error == ERESTART) { cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } } else { /* * NB: cap is !NULL if device is blocked; in * that case return ERESTART so the operation * is resubmitted if possible. */ error = (cap == NULL) ? ENODEV : ERESTART; } CRYPTO_DRIVER_UNLOCK(); if (error) { krp->krp_status = error; crypto_kdone(krp); } return 0; } #ifdef CRYPTO_TIMING static void crypto_tstat(struct cryptotstat *ts, struct bintime *bt) { struct bintime now, delta; struct timespec t; uint64_t u; binuptime(&now); u = now.frac; delta.frac = now.frac - bt->frac; delta.sec = now.sec - bt->sec; if (u < delta.frac) delta.sec--; bintime2timespec(&delta, &t); timespecadd(&ts->acc, &t); if (timespeccmp(&t, &ts->min, <)) ts->min = t; if (timespeccmp(&t, &ts->max, >)) ts->max = t; ts->count++; *bt = now; } #endif static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int hid, result; crp = (struct cryptop *)ctx; - hid = CRYPTO_SESID2HID(crp->crp_sid); + hid = crypto_ses2hid(crp->crp_session); cap = crypto_checkdriver(hid); result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); #endif if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct cryptodesc *crd; - crypto_session_t nid; + crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? */ - crypto_freesession(crp->crp_sid); + crypto_freesession(crp->crp_session); for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); /* XXX propagate flags from initial session? */ - if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), + if (crypto_newsession(&nses, &(crp->crp_desc->CRD_INI), CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) - crp->crp_sid = nid; + crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } /* * Release a set of crypto descriptors. */ void crypto_freereq(struct cryptop *crp) { struct cryptodesc *crd; if (crp == NULL) return; #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif while ((crd = crp->crp_desc) != NULL) { crp->crp_desc = crd->crd_next; uma_zfree(cryptodesc_zone, crd); } uma_zfree(cryptop_zone, crp); } /* * Acquire a set of crypto descriptors. */ struct cryptop * crypto_getreq(int num) { struct cryptodesc *crd; struct cryptop *crp; crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); if (crp != NULL) { while (num--) { crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); if (crd == NULL) { crypto_freereq(crp); return NULL; } crd->crd_next = crp->crp_desc; crp->crp_desc = crd; } } return crp; } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) cryptostats.cs_errs++; #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); #endif /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && - (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC)))) { + (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crp->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crp->crp_callback(crp); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) cryptostats.cs_kerrs++; CRYPTO_DRIVER_LOCK(); /* XXX: What if driver is loaded in the meantime? */ if (krp->krp_hid < crypto_drivers_num) { cap = &crypto_drivers[krp->krp_hid]; KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) crypto_remove(cap); } CRYPTO_DRIVER_UNLOCK(); ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_num; hid++) { const struct cryptocap *cap = &crypto_drivers[hid]; if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; u_int32_t hid; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { - hid = CRYPTO_SESID2HID(crp->crp_sid); + hid = crypto_ses2hid(crp->crp_session); cap = crypto_checkdriver(hid); /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap == NULL || cap->cc_dev == NULL) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ - if (CRYPTO_SESID2HID(submit->crp_sid) == hid) + if (crypto_ses2hid(submit->crp_session) == hid) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); - hid = CRYPTO_SESID2HID(submit->crp_sid); + hid = crypto_ses2hid(submit->crp_session); cap = crypto_checkdriver(hid); KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); result = crypto_invoke(cap, submit, hint); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ /* XXX validate sid again? */ - crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; + crypto_drivers[crypto_ses2hid(submit->crp_session)].cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); cryptostats.cs_blocks++; } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = crypto_checkdriver(krp->krp_hid); if (cap == NULL || cap->cc_dev == NULL) { /* * Operation needs to be migrated, invalidate * the assigned device so it will reselect a * new one below. Propagate the original * crid selection flags if supplied. */ krp->krp_hid = krp->krp_crid & (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE); if (krp->krp_hid == 0) krp->krp_hid = CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE; break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); result = crypto_kinvoke(krp, krp->krp_hid); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ /* XXX validate sid again? */ crypto_drivers[krp->krp_hid].cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); cryptostats.cs_kblocks++; } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; cryptostats.cs_intrs++; } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) { #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crpt->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crpt->crp_callback(crpt); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crpt->crp_callback(crpt); } if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; cryptostats.cs_rets++; } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_num; hid++) { const struct cryptocap *cap = &crypto_drivers[hid]; if (cap->cc_dev == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Desc", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" - , (int) CRYPTO_SESID2HID(crp->crp_sid) - , (int) CRYPTO_SESID2CAPS(crp->crp_sid) + , (int) crypto_ses2hid(crp->crp_session) + , (int) crypto_ses2caps(crp->crp_session) , crp->crp_ilen, crp->crp_olen , crp->crp_etype , crp->crp_flags , crp->crp_desc , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) - , (int) CRYPTO_SESID2HID(crp->crp_sid) + , (int) crypto_ses2hid(crp->crp_session) , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.c =================================================================== --- head/sys/opencrypto/cryptodev.c (revision 336438) +++ head/sys/opencrypto/cryptodev.c (revision 336439) @@ -1,1487 +1,1485 @@ /* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */ /*- * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DECLARE(opencrypto); SDT_PROBE_DEFINE1(opencrypto, dev, ioctl, error, "int"/*line number*/); #ifdef COMPAT_FREEBSD32 #include #include struct session_op32 { u_int32_t cipher; u_int32_t mac; u_int32_t keylen; u_int32_t key; int mackeylen; u_int32_t mackey; u_int32_t ses; }; struct session2_op32 { u_int32_t cipher; u_int32_t mac; u_int32_t keylen; u_int32_t key; int mackeylen; u_int32_t mackey; u_int32_t ses; int crid; int pad[4]; }; struct crypt_op32 { u_int32_t ses; u_int16_t op; u_int16_t flags; u_int len; u_int32_t src, dst; u_int32_t mac; u_int32_t iv; }; struct crparam32 { u_int32_t crp_p; u_int crp_nbits; }; struct crypt_kop32 { u_int crk_op; u_int crk_status; u_short crk_iparams; u_short crk_oparams; u_int crk_crid; struct crparam32 crk_param[CRK_MAXPARAM]; }; struct cryptotstat32 { struct timespec32 acc; struct timespec32 min; struct timespec32 max; u_int32_t count; }; struct cryptostats32 { u_int32_t cs_ops; u_int32_t cs_errs; u_int32_t cs_kops; u_int32_t cs_kerrs; u_int32_t cs_intrs; u_int32_t cs_rets; u_int32_t cs_blocks; u_int32_t cs_kblocks; struct cryptotstat32 cs_invoke; struct cryptotstat32 cs_done; struct cryptotstat32 cs_cb; struct cryptotstat32 cs_finis; }; #define CIOCGSESSION32 _IOWR('c', 101, struct session_op32) #define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32) #define CIOCKEY32 _IOWR('c', 104, struct crypt_kop32) #define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32) #define CIOCKEY232 _IOWR('c', 107, struct crypt_kop32) static void session_op_from_32(const struct session_op32 *from, struct session_op *to) { CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTRIN_CP(*from, *to, key); CP(*from, *to, mackeylen); PTRIN_CP(*from, *to, mackey); CP(*from, *to, ses); } static void session2_op_from_32(const struct session2_op32 *from, struct session2_op *to) { session_op_from_32((const struct session_op32 *)from, (struct session_op *)to); CP(*from, *to, crid); } static void session_op_to_32(const struct session_op *from, struct session_op32 *to) { CP(*from, *to, cipher); CP(*from, *to, mac); CP(*from, *to, keylen); PTROUT_CP(*from, *to, key); CP(*from, *to, mackeylen); PTROUT_CP(*from, *to, mackey); CP(*from, *to, ses); } static void session2_op_to_32(const struct session2_op *from, struct session2_op32 *to) { session_op_to_32((const struct session_op *)from, (struct session_op32 *)to); CP(*from, *to, crid); } static void crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTRIN_CP(*from, *to, src); PTRIN_CP(*from, *to, dst); PTRIN_CP(*from, *to, mac); PTRIN_CP(*from, *to, iv); } static void crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to) { CP(*from, *to, ses); CP(*from, *to, op); CP(*from, *to, flags); CP(*from, *to, len); PTROUT_CP(*from, *to, src); PTROUT_CP(*from, *to, dst); PTROUT_CP(*from, *to, mac); PTROUT_CP(*from, *to, iv); } static void crparam_from_32(const struct crparam32 *from, struct crparam *to) { PTRIN_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crparam_to_32(const struct crparam *from, struct crparam32 *to) { PTROUT_CP(*from, *to, crp_p); CP(*from, *to, crp_nbits); } static void crypt_kop_from_32(const struct crypt_kop32 *from, struct crypt_kop *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_from_32(&from->crk_param[i], &to->crk_param[i]); } static void crypt_kop_to_32(const struct crypt_kop *from, struct crypt_kop32 *to) { int i; CP(*from, *to, crk_op); CP(*from, *to, crk_status); CP(*from, *to, crk_iparams); CP(*from, *to, crk_oparams); CP(*from, *to, crk_crid); for (i = 0; i < CRK_MAXPARAM; i++) crparam_to_32(&from->crk_param[i], &to->crk_param[i]); } #endif struct csession { TAILQ_ENTRY(csession) next; - crypto_session_t sid; + crypto_session_t cses; u_int32_t ses; struct mtx lock; /* for op submission */ u_int32_t cipher; struct enc_xform *txform; u_int32_t mac; struct auth_hash *thash; caddr_t key; int keylen; caddr_t mackey; int mackeylen; }; struct cryptop_data { struct csession *cse; struct iovec iovec[1]; struct uio uio; bool done; }; struct fcrypt { TAILQ_HEAD(csessionlist, csession) csessions; int sesn; }; static int cryptof_ioctl(struct file *, u_long, void *, struct ucred *, struct thread *); static int cryptof_stat(struct file *, struct stat *, struct ucred *, struct thread *); static int cryptof_close(struct file *, struct thread *); static int cryptof_fill_kinfo(struct file *, struct kinfo_file *, struct filedesc *); static struct fileops cryptofops = { .fo_read = invfo_rdwr, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_ioctl = cryptof_ioctl, .fo_poll = invfo_poll, .fo_kqfilter = invfo_kqfilter, .fo_stat = cryptof_stat, .fo_close = cryptof_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = cryptof_fill_kinfo, }; static struct csession *csefind(struct fcrypt *, u_int); static int csedelete(struct fcrypt *, struct csession *); static struct csession *cseadd(struct fcrypt *, struct csession *); static struct csession *csecreate(struct fcrypt *, crypto_session_t, caddr_t, u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *, struct auth_hash *); -static int csefree(struct csession *); +static void csefree(struct csession *); static int cryptodev_op(struct csession *, struct crypt_op *, struct ucred *, struct thread *td); static int cryptodev_aead(struct csession *, struct crypt_aead *, struct ucred *, struct thread *); static int cryptodev_key(struct crypt_kop *); static int cryptodev_find(struct crypt_find_op *); /* * Check a crypto identifier to see if it requested * a software device/driver. This can be done either * by device name/class or through search constraints. */ static int checkforsoftware(int *cridp) { int crid; crid = *cridp; if (!crypto_devallowsoft) { if (crid & CRYPTOCAP_F_SOFTWARE) { if (crid & CRYPTOCAP_F_HARDWARE) { *cridp = CRYPTOCAP_F_HARDWARE; return 0; } return EINVAL; } if ((crid & CRYPTOCAP_F_HARDWARE) == 0 && (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0) return EINVAL; } return 0; } /* ARGSUSED */ static int cryptof_ioctl( struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { #define SES2(p) ((struct session2_op *)p) struct cryptoini cria, crie; struct fcrypt *fcr = fp->f_data; struct csession *cse; struct session_op *sop; struct crypt_op *cop; struct crypt_aead *caead; struct enc_xform *txform = NULL; struct auth_hash *thash = NULL; struct crypt_kop *kop; - crypto_session_t sid; + crypto_session_t cses; u_int32_t ses; int error = 0, crid; #ifdef COMPAT_FREEBSD32 struct session2_op sopc; struct crypt_op copc; struct crypt_kop kopc; #endif switch (cmd) { case CIOCGSESSION: case CIOCGSESSION2: #ifdef COMPAT_FREEBSD32 case CIOCGSESSION32: case CIOCGSESSION232: if (cmd == CIOCGSESSION32) { session_op_from_32(data, (struct session_op *)&sopc); sop = (struct session_op *)&sopc; } else if (cmd == CIOCGSESSION232) { session2_op_from_32(data, &sopc); sop = (struct session_op *)&sopc; } else #endif sop = (struct session_op *)data; switch (sop->cipher) { case 0: break; case CRYPTO_DES_CBC: txform = &enc_xform_des; break; case CRYPTO_3DES_CBC: txform = &enc_xform_3des; break; case CRYPTO_BLF_CBC: txform = &enc_xform_blf; break; case CRYPTO_CAST_CBC: txform = &enc_xform_cast5; break; case CRYPTO_SKIPJACK_CBC: txform = &enc_xform_skipjack; break; case CRYPTO_AES_CBC: txform = &enc_xform_rijndael128; break; case CRYPTO_AES_XTS: txform = &enc_xform_aes_xts; break; case CRYPTO_NULL_CBC: txform = &enc_xform_null; break; case CRYPTO_ARC4: txform = &enc_xform_arc4; break; case CRYPTO_CAMELLIA_CBC: txform = &enc_xform_camellia; break; case CRYPTO_AES_ICM: txform = &enc_xform_aes_icm; break; case CRYPTO_AES_NIST_GCM_16: txform = &enc_xform_aes_nist_gcm; break; case CRYPTO_CHACHA20: txform = &enc_xform_chacha20; break; default: CRYPTDEB("invalid cipher"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } switch (sop->mac) { case 0: break; case CRYPTO_MD5_HMAC: thash = &auth_hash_hmac_md5; break; case CRYPTO_SHA1_HMAC: thash = &auth_hash_hmac_sha1; break; case CRYPTO_SHA2_224_HMAC: thash = &auth_hash_hmac_sha2_224; break; case CRYPTO_SHA2_256_HMAC: thash = &auth_hash_hmac_sha2_256; break; case CRYPTO_SHA2_384_HMAC: thash = &auth_hash_hmac_sha2_384; break; case CRYPTO_SHA2_512_HMAC: thash = &auth_hash_hmac_sha2_512; break; case CRYPTO_RIPEMD160_HMAC: thash = &auth_hash_hmac_ripemd_160; break; case CRYPTO_AES_128_NIST_GMAC: thash = &auth_hash_nist_gmac_aes_128; break; case CRYPTO_AES_192_NIST_GMAC: thash = &auth_hash_nist_gmac_aes_192; break; case CRYPTO_AES_256_NIST_GMAC: thash = &auth_hash_nist_gmac_aes_256; break; #ifdef notdef case CRYPTO_MD5: thash = &auth_hash_md5; break; #endif case CRYPTO_SHA1: thash = &auth_hash_sha1; break; case CRYPTO_SHA2_224: thash = &auth_hash_sha2_224; break; case CRYPTO_SHA2_256: thash = &auth_hash_sha2_256; break; case CRYPTO_SHA2_384: thash = &auth_hash_sha2_384; break; case CRYPTO_SHA2_512: thash = &auth_hash_sha2_512; break; case CRYPTO_NULL_HMAC: thash = &auth_hash_null; break; case CRYPTO_BLAKE2B: thash = &auth_hash_blake2b; break; case CRYPTO_BLAKE2S: thash = &auth_hash_blake2s; break; default: CRYPTDEB("invalid mac"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } bzero(&crie, sizeof(crie)); bzero(&cria, sizeof(cria)); if (txform) { crie.cri_alg = txform->type; crie.cri_klen = sop->keylen * 8; if (sop->keylen > txform->maxkey || sop->keylen < txform->minkey) { CRYPTDEB("invalid cipher parameters"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK); if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8))) { CRYPTDEB("invalid key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (thash) crie.cri_next = &cria; } if (thash) { cria.cri_alg = thash->type; cria.cri_klen = sop->mackeylen * 8; if (thash->keysize != 0 && sop->mackeylen > thash->keysize) { CRYPTDEB("invalid mac key length"); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (cria.cri_klen) { cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA, M_WAITOK); if ((error = copyin(sop->mackey, cria.cri_key, cria.cri_klen / 8))) { CRYPTDEB("invalid mac key"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } } /* NB: CIOCGSESSION2 has the crid */ if (cmd == CIOCGSESSION2 #ifdef COMPAT_FREEBSD32 || cmd == CIOCGSESSION232 #endif ) { crid = SES2(sop)->crid; error = checkforsoftware(&crid); if (error) { CRYPTDEB("checkforsoftware"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } } else crid = CRYPTOCAP_F_HARDWARE; - error = crypto_newsession(&sid, (txform ? &crie : &cria), crid); + error = crypto_newsession(&cses, (txform ? &crie : &cria), crid); if (error) { CRYPTDEB("crypto_newsession"); SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } - cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen, + cse = csecreate(fcr, cses, crie.cri_key, crie.cri_klen, cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform, thash); if (cse == NULL) { - crypto_freesession(sid); + crypto_freesession(cses); error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); CRYPTDEB("csecreate"); goto bail; } sop->ses = cse->ses; if (cmd == CIOCGSESSION2 #ifdef COMPAT_FREEBSD32 || cmd == CIOCGSESSION232 #endif ) { /* return hardware/driver id */ - SES2(sop)->crid = CRYPTO_SESID2HID(cse->sid); + SES2(sop)->crid = crypto_ses2hid(cse->cses); } bail: if (error) { if (crie.cri_key) free(crie.cri_key, M_XDATA); if (cria.cri_key) free(cria.cri_key, M_XDATA); } #ifdef COMPAT_FREEBSD32 else { if (cmd == CIOCGSESSION32) session_op_to_32(sop, data); else if (cmd == CIOCGSESSION232) session2_op_to_32((struct session2_op *)sop, data); } #endif break; case CIOCFSESSION: ses = *(u_int32_t *)data; cse = csefind(fcr, ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } csedelete(fcr, cse); - error = csefree(cse); + csefree(cse); break; case CIOCCRYPT: #ifdef COMPAT_FREEBSD32 case CIOCCRYPT32: if (cmd == CIOCCRYPT32) { cop = &copc; crypt_op_from_32(data, cop); } else #endif cop = (struct crypt_op *)data; cse = csefind(fcr, cop->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_op(cse, cop, active_cred, td); #ifdef COMPAT_FREEBSD32 if (error == 0 && cmd == CIOCCRYPT32) crypt_op_to_32(cop, data); #endif break; case CIOCKEY: case CIOCKEY2: #ifdef COMPAT_FREEBSD32 case CIOCKEY32: case CIOCKEY232: #endif if (!crypto_userasymcrypto) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EPERM); /* XXX compat? */ } #ifdef COMPAT_FREEBSD32 if (cmd == CIOCKEY32 || cmd == CIOCKEY232) { kop = &kopc; crypt_kop_from_32(data, kop); } else #endif kop = (struct crypt_kop *)data; if (cmd == CIOCKEY #ifdef COMPAT_FREEBSD32 || cmd == CIOCKEY32 #endif ) { /* NB: crypto core enforces s/w driver use */ kop->crk_crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE; } mtx_lock(&Giant); error = cryptodev_key(kop); mtx_unlock(&Giant); #ifdef COMPAT_FREEBSD32 if (cmd == CIOCKEY32 || cmd == CIOCKEY232) crypt_kop_to_32(kop, data); #endif break; case CIOCASYMFEAT: if (!crypto_userasymcrypto) { /* * NB: if user asym crypto operations are * not permitted return "no algorithms" * so well-behaved applications will just * fallback to doing them in software. */ *(int *)data = 0; } else { error = crypto_getfeat((int *)data); if (error) SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); } break; case CIOCFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CIOCCRYPTAEAD: caead = (struct crypt_aead *)data; cse = csefind(fcr, caead->ses); if (cse == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } error = cryptodev_aead(cse, caead, active_cred, td); break; default: error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); break; } return (error); #undef SES2 } static int cryptodev_cb(struct cryptop *); static struct cryptop_data * cod_alloc(struct csession *cse, size_t len, struct thread *td) { struct cryptop_data *cod; struct uio *uio; cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO); cod->cse = cse; uio = &cod->uio; uio->uio_iov = cod->iovec; uio->uio_iovcnt = 1; uio->uio_resid = len; uio->uio_segflg = UIO_SYSSPACE; uio->uio_rw = UIO_WRITE; uio->uio_td = td; uio->uio_iov[0].iov_len = len; uio->uio_iov[0].iov_base = malloc(len, M_XDATA, M_WAITOK); return (cod); } static void cod_free(struct cryptop_data *cod) { free(cod->uio.uio_iov[0].iov_base, M_XDATA); free(cod, M_XDATA); } static int cryptodev_op( struct csession *cse, struct crypt_op *cop, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; struct cryptodesc *crde = NULL, *crda = NULL; int error; if (cop->len > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform) { if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } } if (cse->thash) cod = cod_alloc(cse, cop->len + cse->thash->hashsize, td); else cod = cod_alloc(cse, cop->len, td); crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL)); if (crp == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = ENOMEM; goto bail; } if (cse->thash && cse->txform) { if (cop->flags & COP_F_CIPHER_FIRST) { crde = crp->crp_desc; crda = crde->crd_next; } else { crda = crp->crp_desc; crde = crda->crd_next; } } else if (cse->thash) { crda = crp->crp_desc; } else if (cse->txform) { crde = crp->crp_desc; } else { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } if ((error = copyin(cop->src, cod->uio.uio_iov[0].iov_base, cop->len))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (crda) { crda->crd_skip = 0; crda->crd_len = cop->len; crda->crd_inject = cop->len; crda->crd_alg = cse->mac; crda->crd_key = cse->mackey; crda->crd_klen = cse->mackeylen * 8; } if (crde) { if (cop->op == COP_ENCRYPT) crde->crd_flags |= CRD_F_ENCRYPT; else crde->crd_flags &= ~CRD_F_ENCRYPT; crde->crd_len = cop->len; crde->crd_inject = 0; crde->crd_alg = cse->cipher; crde->crd_key = cse->key; crde->crd_klen = cse->keylen * 8; } crp->crp_ilen = cop->len; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH); crp->crp_uio = &cod->uio; crp->crp_callback = cryptodev_cb; - crp->crp_sid = cse->sid; + crp->crp_session = cse->cses; crp->crp_opaque = cod; if (cop->iv) { if (crde == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } if ((error = copyin(cop->iv, crde->crd_iv, cse->txform->ivsize))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; crde->crd_skip = 0; } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ crde->crd_skip = 0; } else if (crde) { crde->crd_flags |= CRD_F_IV_PRESENT; crde->crd_skip = cse->txform->ivsize; crde->crd_len -= cse->txform->ivsize; } if (cop->mac && crda == NULL) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = EINVAL; goto bail; } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); error = crp->crp_etype; goto bail; } if (cop->dst && (error = copyout(cod->uio.uio_iov[0].iov_base, cop->dst, cop->len))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (cop->mac && (error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base + cop->len, cop->mac, cse->thash->hashsize))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } bail: if (crp) crypto_freereq(crp); if (cod) cod_free(cod); return (error); } static int cryptodev_aead( struct csession *cse, struct crypt_aead *caead, struct ucred *active_cred, struct thread *td) { struct cryptop_data *cod = NULL; struct cryptop *crp = NULL; struct cryptodesc *crde = NULL, *crda = NULL; int error; if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (E2BIG); } if (cse->txform == NULL || cse->thash == NULL || caead->tag == NULL || (caead->len % cse->txform->blocksize) != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } cod = cod_alloc(cse, caead->aadlen + caead->len + cse->thash->hashsize, td); crp = crypto_getreq(2); if (crp == NULL) { error = ENOMEM; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (caead->flags & COP_F_CIPHER_FIRST) { crde = crp->crp_desc; crda = crde->crd_next; } else { crda = crp->crp_desc; crde = crda->crd_next; } if ((error = copyin(caead->aad, cod->uio.uio_iov[0].iov_base, caead->aadlen))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if ((error = copyin(caead->src, (char *)cod->uio.uio_iov[0].iov_base + caead->aadlen, caead->len))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } /* * For GCM, crd_len covers only the AAD. For other ciphers * chained with an HMAC, crd_len covers both the AAD and the * cipher text. */ crda->crd_skip = 0; if (cse->cipher == CRYPTO_AES_NIST_GCM_16) crda->crd_len = caead->aadlen; else crda->crd_len = caead->aadlen + caead->len; crda->crd_inject = caead->aadlen + caead->len; crda->crd_alg = cse->mac; crda->crd_key = cse->mackey; crda->crd_klen = cse->mackeylen * 8; if (caead->op == COP_ENCRYPT) crde->crd_flags |= CRD_F_ENCRYPT; else crde->crd_flags &= ~CRD_F_ENCRYPT; crde->crd_skip = caead->aadlen; crde->crd_len = caead->len; crde->crd_inject = caead->aadlen; crde->crd_alg = cse->cipher; crde->crd_key = cse->key; crde->crd_klen = cse->keylen * 8; crp->crp_ilen = caead->aadlen + caead->len; crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH); crp->crp_uio = &cod->uio; crp->crp_callback = cryptodev_cb; - crp->crp_sid = cse->sid; + crp->crp_session = cse->cses; crp->crp_opaque = cod; if (caead->iv) { if (caead->ivlen > sizeof(crde->crd_iv)) { error = EINVAL; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if ((error = copyin(caead->iv, crde->crd_iv, caead->ivlen))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; } else { crde->crd_flags |= CRD_F_IV_PRESENT; crde->crd_skip += cse->txform->ivsize; crde->crd_len -= cse->txform->ivsize; } if ((error = copyin(caead->tag, (caddr_t)cod->uio.uio_iov[0].iov_base + caead->len + caead->aadlen, cse->thash->hashsize))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } again: /* * Let the dispatch run unlocked, then, interlock against the * callback before checking if the operation completed and going * to sleep. This insures drivers don't inherit our lock which * results in a lock order reversal between crypto_dispatch forced * entry and the crypto_done callback into us. */ error = crypto_dispatch(crp); if (error != 0) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } mtx_lock(&cse->lock); while (!cod->done) mtx_sleep(cod, &cse->lock, PWAIT, "crydev", 0); mtx_unlock(&cse->lock); if (crp->crp_etype == EAGAIN) { crp->crp_etype = 0; crp->crp_flags &= ~CRYPTO_F_DONE; cod->done = false; goto again; } if (crp->crp_etype != 0) { error = crp->crp_etype; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if (caead->dst && (error = copyout( (caddr_t)cod->uio.uio_iov[0].iov_base + caead->aadlen, caead->dst, caead->len))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } if ((error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base + caead->aadlen + caead->len, caead->tag, cse->thash->hashsize))) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto bail; } bail: crypto_freereq(crp); if (cod) cod_free(cod); return (error); } static int cryptodev_cb(struct cryptop *crp) { struct cryptop_data *cod = crp->crp_opaque; /* * Lock to ensure the wakeup() is not missed by the loops * waiting on cod->done in cryptodev_op() and * cryptodev_aead(). */ mtx_lock(&cod->cse->lock); cod->done = true; mtx_unlock(&cod->cse->lock); wakeup(cod); return (0); } static int cryptodevkey_cb(void *op) { struct cryptkop *krp = (struct cryptkop *) op; wakeup_one(krp); return (0); } static int cryptodev_key(struct crypt_kop *kop) { struct cryptkop *krp = NULL; int error = EINVAL; int in, out, size, i; if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EFBIG); } in = kop->crk_iparams; out = kop->crk_oparams; switch (kop->crk_op) { case CRK_MOD_EXP: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_MOD_EXP_CRT: if (in == 6 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_SIGN: if (in == 5 && out == 2) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DSA_VERIFY: if (in == 7 && out == 0) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); case CRK_DH_COMPUTE_KEY: if (in == 3 && out == 1) break; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); default: SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (EINVAL); } krp = (struct cryptkop *)malloc(sizeof *krp, M_XDATA, M_WAITOK|M_ZERO); if (!krp) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); return (ENOMEM); } krp->krp_op = kop->crk_op; krp->krp_status = kop->crk_status; krp->krp_iparams = kop->crk_iparams; krp->krp_oparams = kop->crk_oparams; krp->krp_crid = kop->crk_crid; krp->krp_status = 0; krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb; for (i = 0; i < CRK_MAXPARAM; i++) { if (kop->crk_param[i].crp_nbits > 65536) { /* Limit is the same as in OpenBSD */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; } for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; krp->krp_param[i].crp_p = malloc(size, M_XDATA, M_WAITOK); if (i >= krp->krp_iparams) continue; error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } error = crypto_kdispatch(krp); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } error = tsleep(krp, PSOCK, "crydev", 0); if (error) { /* XXX can this happen? if so, how do we recover? */ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } kop->crk_crid = krp->krp_crid; /* device that did the work */ if (krp->krp_status != 0) { error = krp->krp_status; SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { size = (krp->krp_param[i].crp_nbits + 7) / 8; if (size == 0) continue; error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size); if (error) { SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__); goto fail; } } fail: if (krp) { kop->crk_status = krp->krp_status; for (i = 0; i < CRK_MAXPARAM; i++) { if (krp->krp_param[i].crp_p) free(krp->krp_param[i].crp_p, M_XDATA); } free(krp, M_XDATA); } return (error); } static int cryptodev_find(struct crypt_find_op *find) { device_t dev; size_t fnlen = sizeof find->name; if (find->crid != -1) { dev = crypto_find_device_byhid(find->crid); if (dev == NULL) return (ENOENT); strncpy(find->name, device_get_nameunit(dev), fnlen); find->name[fnlen - 1] = '\x0'; } else { find->name[fnlen - 1] = '\x0'; find->crid = crypto_find_driver(find->name); if (find->crid == -1) return (ENOENT); } return (0); } /* ARGSUSED */ static int cryptof_stat( struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } /* ARGSUSED */ static int cryptof_close(struct file *fp, struct thread *td) { struct fcrypt *fcr = fp->f_data; struct csession *cse; while ((cse = TAILQ_FIRST(&fcr->csessions))) { TAILQ_REMOVE(&fcr->csessions, cse, next); - (void)csefree(cse); + csefree(cse); } free(fcr, M_XDATA); fp->f_data = NULL; return 0; } static int cryptof_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_CRYPTO; return (0); } static struct csession * csefind(struct fcrypt *fcr, u_int ses) { struct csession *cse; TAILQ_FOREACH(cse, &fcr->csessions, next) if (cse->ses == ses) return (cse); return (NULL); } static int csedelete(struct fcrypt *fcr, struct csession *cse_del) { struct csession *cse; TAILQ_FOREACH(cse, &fcr->csessions, next) { if (cse == cse_del) { TAILQ_REMOVE(&fcr->csessions, cse, next); return (1); } } return (0); } static struct csession * cseadd(struct fcrypt *fcr, struct csession *cse) { TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); cse->ses = fcr->sesn++; return (cse); } struct csession * -csecreate(struct fcrypt *fcr, crypto_session_t sid, caddr_t key, u_int64_t keylen, +csecreate(struct fcrypt *fcr, crypto_session_t cses, caddr_t key, u_int64_t keylen, caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac, struct enc_xform *txform, struct auth_hash *thash) { struct csession *cse; cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO); if (cse == NULL) return NULL; mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF); cse->key = key; cse->keylen = keylen/8; cse->mackey = mackey; cse->mackeylen = mackeylen/8; - cse->sid = sid; + cse->cses = cses; cse->cipher = cipher; cse->mac = mac; cse->txform = txform; cse->thash = thash; cseadd(fcr, cse); return (cse); } -static int +static void csefree(struct csession *cse) { - int error; - error = crypto_freesession(cse->sid); + crypto_freesession(cse->cses); mtx_destroy(&cse->lock); if (cse->key) free(cse->key, M_XDATA); if (cse->mackey) free(cse->mackey, M_XDATA); free(cse, M_XDATA); - return (error); } static int cryptoopen(struct cdev *dev, int oflags, int devtype, struct thread *td) { return (0); } static int cryptoread(struct cdev *dev, struct uio *uio, int ioflag) { return (EIO); } static int cryptowrite(struct cdev *dev, struct uio *uio, int ioflag) { return (EIO); } static int cryptoioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { struct file *f; struct fcrypt *fcr; int fd, error; switch (cmd) { case CRIOGET: fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK); TAILQ_INIT(&fcr->csessions); fcr->sesn = 0; error = falloc(td, &f, &fd, 0); if (error) { free(fcr, M_XDATA); return (error); } /* falloc automatically provides an extra reference to 'f'. */ finit(f, FREAD | FWRITE, DTYPE_CRYPTO, fcr, &cryptofops); *(u_int32_t *)data = fd; fdrop(f, td); break; case CRIOFINDDEV: error = cryptodev_find((struct crypt_find_op *)data); break; case CRIOASYMFEAT: error = crypto_getfeat((int *)data); break; default: error = EINVAL; break; } return (error); } static struct cdevsw crypto_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = cryptoopen, .d_read = cryptoread, .d_write = cryptowrite, .d_ioctl = cryptoioctl, .d_name = "crypto", }; static struct cdev *crypto_dev; /* * Initialization code, both for static and dynamic loading. */ static int cryptodev_modevent(module_t mod, int type, void *unused) { switch (type) { case MOD_LOAD: if (bootverbose) printf("crypto: \n"); crypto_dev = make_dev(&crypto_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "crypto"); return 0; case MOD_UNLOAD: /*XXX disallow if active sessions */ destroy_dev(crypto_dev); return 0; } return EINVAL; } static moduledata_t cryptodev_mod = { "cryptodev", cryptodev_modevent, 0 }; MODULE_VERSION(cryptodev, 1); DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); MODULE_DEPEND(cryptodev, crypto, 1, 1, 1); MODULE_DEPEND(cryptodev, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.h =================================================================== --- head/sys/opencrypto/cryptodev.h (revision 336438) +++ head/sys/opencrypto/cryptodev.h (revision 336439) @@ -1,565 +1,564 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #include #ifdef _KERNEL #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 #define CRYPTO_SW_SESSIONS 32 /* Hash values */ #define NULL_HASH_LEN 16 #define MD5_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define MD5_KPDK_HASH_LEN 16 #define SHA1_KPDK_HASH_LEN 20 #define AES_GMAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define MD5_BLOCK_LEN 64 #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define DES_BLOCK_LEN 8 #define DES3_BLOCK_LEN 8 #define BLOWFISH_BLOCK_LEN 8 #define SKIPJACK_BLOCK_LEN 8 #define CAST128_BLOCK_LEN 8 #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define ARC4_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define ARC4_IV_LEN 1 #define AES_GCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define DES_MIN_KEY 8 #define DES_MAX_KEY DES_MIN_KEY #define TRIPLE_DES_MIN_KEY 24 #define TRIPLE_DES_MAX_KEY TRIPLE_DES_MIN_KEY #define BLOWFISH_MIN_KEY 5 #define BLOWFISH_MAX_KEY 56 /* 448 bits, max key */ #define CAST_MIN_KEY 5 #define CAST_MAX_KEY 16 #define SKIPJACK_MIN_KEY 10 #define SKIPJACK_MAX_KEY SKIPJACK_MIN_KEY #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define ARC4_MIN_KEY 1 #define ARC4_MAX_KEY 32 #define CAMELLIA_MIN_KEY 8 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* cipher side */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_ALGORITHM_MAX 37 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ }; +/* + * session and crypt _op structs are used by userspace programs to interact + * with /dev/crypto. Confusingly, the internal kernel interface is named + * "cryptop" (no underscore). + */ struct session2_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 u_int16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; caddr_t mac; /* must be big enough for chosen MAC */ c_caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; c_caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ c_caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ #define CRIOGET _IOWR('c', 100, u_int32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, u_int32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, u_int32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptotstat { struct timespec acc; /* total accumulated time */ struct timespec min; /* min time */ struct timespec max; /* max time */ u_int32_t count; /* number of observations */ }; struct cryptostats { u_int32_t cs_ops; /* symmetric crypto ops submitted */ u_int32_t cs_errs; /* symmetric crypto ops that failed */ u_int32_t cs_kops; /* asymetric/key ops submitted */ u_int32_t cs_kerrs; /* asymetric/key ops that failed */ u_int32_t cs_intrs; /* crypto swi thread activations */ u_int32_t cs_rets; /* crypto return thread activations */ u_int32_t cs_blocks; /* symmetric op driver block */ u_int32_t cs_kblocks; /* symmetric op driver block */ /* * When CRYPTO_TIMING is defined at compile time and the * sysctl debug.crypto is set to 1, the crypto system will * accumulate statistics about how long it takes to process * crypto requests at various points during processing. */ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */ struct cryptotstat cs_cb; /* crypto_done -> callback */ struct cryptotstat cs_finis; /* callback -> callback return */ }; #ifdef _KERNEL #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif /* Standard initialization structure beginning */ struct cryptoini { int cri_alg; /* Algorithm to use */ int cri_klen; /* Key length, in bits */ int cri_mlen; /* Number of bytes we want from the entire hash. 0 means all. */ caddr_t cri_key; /* key to use */ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */ struct cryptoini *cri_next; }; /* Describe boundaries of a single crypto operation */ struct cryptodesc { int crd_skip; /* How many bytes to ignore from start */ int crd_len; /* How many bytes to process */ int crd_inject; /* Where to inject results, if applicable */ int crd_flags; #define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */ #define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in place, so don't copy. */ #define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */ #define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */ #define CRD_F_COMP 0x0f /* Set when doing compression */ #define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */ struct cryptoini CRD_INI; /* Initialization/context data */ #define crd_esn CRD_INI.cri_esn #define crd_iv CRD_INI.cri_iv #define crd_key CRD_INI.cri_key #define crd_alg CRD_INI.cri_alg #define crd_klen CRD_INI.cri_klen struct cryptodesc *crd_next; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; - crypto_session_t crp_sid; /* Session ID */ + crypto_session_t crp_session; /* Session */ int crp_ilen; /* Input data total length */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all - * errors, the crp_sid may have changed + * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */ #define CRYPTO_F_IOV 0x0002 /* Input/output are uio */ #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ #define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads * if op is synchronous */ #define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* * Dispatch the crypto jobs in the same * order there are submitted. Applied only * if CRYPTO_F_ASYNC flags is set */ union { caddr_t crp_buf; /* Data to be processed */ struct mbuf *crp_mbuf; struct uio *crp_uio; }; void * crp_opaque; /* Opaque pointer, passed along */ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; #define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ - CRYPTO_SESID2CAPS((crp)->crp_sid) & CRYPTOCAP_F_SYNC) + crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_BUF_CONTIG 0x0 #define CRYPTO_BUF_IOV 0x1 #define CRYPTO_BUF_MBUF 0x2 #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ u_int32_t krp_hid; struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ int (*krp_callback)(struct cryptkop *); }; -/* - * Session ids are 64 bits. The lower 32 bits contain a "local id" which - * is a driver-private session identifier. The upper 32 bits contain a - * "hardware id" used by the core crypto code to identify the driver and - * a copy of the driver's capabilities that can be used by client code to - * optimize operation. - */ -#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff) -#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000) -#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff) +uint32_t crypto_ses2hid(crypto_session_t crypto_session); +uint32_t crypto_ses2caps(crypto_session_t crypto_session); +void *crypto_get_driver_session(crypto_session_t crypto_session); MALLOC_DECLARE(M_CRYPTO_DATA); -extern int crypto_newsession(crypto_session_t *sid, struct cryptoini *cri, int hard); -extern int crypto_freesession(crypto_session_t sid); +extern int crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int hard); +extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ -extern int32_t crypto_get_driverid(device_t dev, int flags); +extern int32_t crypto_get_driverid(device_t dev, size_t session_size, + int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, u_int32_t flags); extern int crypto_kregister(u_int32_t, int, u_int32_t); extern int crypto_unregister(u_int32_t driverid, int alg); extern int crypto_unregister_all(u_int32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(u_int32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(int num); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ /* * Crypto-related utility routines used mainly by drivers. * * XXX these don't really belong here; but for now they're * kept apart from the rest of the system. */ struct uio; extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); extern void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp); extern int cuio_getptr(struct uio *uio, int loc, int *off); extern int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void *arg); struct mbuf; struct iovec; extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt, int *allocated); extern void crypto_copyback(int flags, caddr_t buf, int off, int size, c_caddr_t in); extern void crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out); extern int crypto_apply(int flags, caddr_t buf, int off, int len, int (*f)(void *, void *, u_int), void *arg); #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ Index: head/sys/opencrypto/cryptodev_if.m =================================================================== --- head/sys/opencrypto/cryptodev_if.m (revision 336438) +++ head/sys/opencrypto/cryptodev_if.m (revision 336439) @@ -1,55 +1,73 @@ #- # Copyright (c) 2006, Sam Leffler # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # #include #include INTERFACE cryptodev; +CODE { + static int null_freesession(device_t dev, + crypto_session_t crypto_session) + { + return 0; + } +}; + +/** + * Crypto driver method to initialize a new session object with the given + * initialization parameters (cryptoini). The driver's session memory object + * is already allocated and zeroed, like driver softcs. It is accessed with + * crypto_get_driver_session(). + */ METHOD int newsession { device_t dev; - uint32_t *sid; + crypto_session_t crypto_session; struct cryptoini *cri; }; -METHOD int freesession { +/** + * Optional crypto driver method to release any additional allocations. OCF + * owns session memory itself; it is zeroed before release. + */ +METHOD void freesession { device_t dev; - crypto_session_t sid; -}; + crypto_session_t crypto_session; +} DEFAULT null_freesession; METHOD int process { device_t dev; struct cryptop *op; int flags; }; METHOD int kprocess { device_t dev; struct cryptkop *op; int flags; }; Index: head/sys/opencrypto/cryptosoft.c =================================================================== --- head/sys/opencrypto/cryptosoft.c (revision 336438) +++ head/sys/opencrypto/cryptosoft.c (revision 336439) @@ -1,1397 +1,1299 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" static int32_t swcr_id; -static struct swcr_data **swcr_sessions = NULL; -static u_int32_t swcr_sesnum; -/* Protects swcr_sessions pointer, not data. */ -static struct rwlock swcr_sessions_lock; u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); static int swcr_authenc(struct cryptop *crp); static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); -static int swcr_freesession(device_t dev, u_int64_t tid); -static int swcr_freesession_locked(device_t dev, u_int64_t tid); +static void swcr_freesession(device_t dev, crypto_session_t cses); /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, int flags) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; struct enc_xform *exf; int i, j, k, blks, ind, count, ivlen; struct uio *uio, uiolcl; struct iovec iovlcl[4]; struct iovec *iov; int iovcnt, iovalloc; int error; error = 0; exf = sw->sw_exf; blks = exf->blocksize; ivlen = exf->ivsize; /* Check for non-padded data */ if (crd->crd_len % blks) return EINVAL; if (crd->crd_alg == CRYPTO_AES_ICM && (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) return (EINVAL); /* Initialize the IV */ if (crd->crd_flags & CRD_F_ENCRYPT) { /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, ivlen); else arc4rand(iv, ivlen, 0); /* Do we need to write the IV */ if (!(crd->crd_flags & CRD_F_IV_PRESENT)) crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); } else { /* Decryption */ /* IV explicitly provided ? */ if (crd->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crd->crd_iv, iv, ivlen); else { /* Get IV off buf */ crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); } } if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { int error; if (sw->sw_kschedule) exf->zerokey(&(sw->sw_kschedule)); error = exf->setkey(&sw->sw_kschedule, crd->crd_key, crd->crd_klen / 8); if (error) return (error); } iov = iovlcl; iovcnt = nitems(iovlcl); iovalloc = 0; uio = &uiolcl; if ((flags & CRYPTO_F_IMBUF) != 0) { error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, &iovalloc); if (error) return (error); uio->uio_iov = iov; uio->uio_iovcnt = iovcnt; } else if ((flags & CRYPTO_F_IOV) != 0) uio = (struct uio *)buf; else { iov[0].iov_base = buf; iov[0].iov_len = crd->crd_skip + crd->crd_len; uio->uio_iov = iov; uio->uio_iovcnt = 1; } ivp = iv; if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv); } count = crd->crd_skip; ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i = crd->crd_len; while (i > 0) { /* * If there's insufficient data at the end of * an iovec, we have to do some copying. */ if (uio->uio_iov[ind].iov_len < k + blks && uio->uio_iov[ind].iov_len != k) { cuio_copydata(uio, count, blks, blk); /* Actual encryption/decryption */ if (exf->reinit) { if (crd->crd_flags & CRD_F_ENCRYPT) { exf->encrypt(sw->sw_kschedule, blk); } else { exf->decrypt(sw->sw_kschedule, blk); } } else if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, blk); /* * Keep encrypted block for XOR'ing * with next block */ bcopy(blk, iv, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; bcopy(blk, nivp, blks); exf->decrypt(sw->sw_kschedule, blk); /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; ivp = nivp; } /* Copy back decrypted block */ cuio_copyback(uio, count, blks, blk); count += blks; /* Advance pointer */ ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i -= blks; /* Could be done... */ if (i == 0) break; } while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { uint8_t *idat; size_t nb, rem; nb = blks; rem = MIN((size_t)i, uio->uio_iov[ind].iov_len - (size_t)k); idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; if (exf->reinit) { if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 && exf->encrypt_multi == NULL) exf->encrypt(sw->sw_kschedule, idat); else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) { nb = rounddown(rem, blks); exf->encrypt_multi(sw->sw_kschedule, idat, nb); } else if (exf->decrypt_multi == NULL) exf->decrypt(sw->sw_kschedule, idat); else { nb = rounddown(rem, blks); exf->decrypt_multi(sw->sw_kschedule, idat, nb); } } else if (crd->crd_flags & CRD_F_ENCRYPT) { /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, idat); ivp = idat; } else { /* decrypt */ /* * Keep encrypted block to be used * in next block's processing. */ nivp = (ivp == iv) ? iv2 : iv; bcopy(idat, nivp, blks); exf->decrypt(sw->sw_kschedule, idat); /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; ivp = nivp; } count += nb; k += nb; i -= nb; } /* * Advance to the next iov if the end of the current iov * is aligned with the end of a cipher block. * Note that the code is equivalent to calling: * ind = cuio_getptr(uio, count, &k); */ if (i > 0 && k == uio->uio_iov[ind].iov_len) { k = 0; ind++; if (ind >= uio->uio_iovcnt) { error = EINVAL; goto out; } } } out: if (iovalloc) free(iov, M_CRYPTO_DATA); return (error); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, int klen) { int k; klen /= 8; switch (axf->type) { case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: for (k = 0; k < klen; k++) key[k] ^= HMAC_IPAD_VAL; axf->Init(sw->sw_ictx); axf->Update(sw->sw_ictx, key, klen); axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); for (k = 0; k < klen; k++) key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); axf->Init(sw->sw_octx); axf->Update(sw->sw_octx, key, klen); axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); for (k = 0; k < klen; k++) key[k] ^= HMAC_OPAD_VAL; break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: { /* * We need a buffer that can hold an md5 and a sha1 result * just to throw it away. * What we do here is the initial part of: * ALGO( key, keyfill, .. ) * adding the key to sw_ictx and abusing Final() to get the * "keyfill" padding. * In addition we abuse the sw_octx to save the key to have * it to be able to append it at the end in swcr_authcompute(). */ u_char buf[SHA1_RESULTLEN]; sw->sw_klen = klen; bcopy(key, sw->sw_octx, klen); axf->Init(sw->sw_ictx); axf->Update(sw->sw_ictx, key, klen); axf->Final(buf, sw->sw_ictx); break; } case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " "doesn't use keys.\n", __func__, axf->type); } } /* * Compute keyed-hash authenticator. */ static int swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, int flags) { unsigned char aalg[HASH_MAX_LEN]; struct auth_hash *axf; union authctx ctx; int err; if (sw->sw_ictx == 0) return EINVAL; axf = sw->sw_axf; if (crd->crd_flags & CRD_F_KEY_EXPLICIT) swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); bcopy(sw->sw_ictx, &ctx, axf->ctxsize); err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); if (err) return err; switch (sw->sw_alg) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Final(aalg, &ctx); break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: if (sw->sw_octx == NULL) return EINVAL; axf->Final(aalg, &ctx); bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: /* If we have no key saved, return error. */ if (sw->sw_octx == NULL) return EINVAL; /* * Add the trailing copy of the key (see comment in * swcr_authprepare()) after the data: * ALGO( .., key, algofill ) * and let Final() do the proper, natural "algofill" * padding. */ axf->Update(&ctx, sw->sw_octx, sw->sw_klen); axf->Final(aalg, &ctx); break; case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: case CRYPTO_NULL_HMAC: axf->Final(aalg, &ctx); break; } /* Inject the authentication data */ crypto_copyback(flags, buf, crd->crd_inject, sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); return 0; } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ /* * Apply a combined encryption-authentication transformation */ static int swcr_authenc(struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct cryptodesc *crd, *crda = NULL, *crde = NULL; struct swcr_data *sw, *swa, *swe = NULL; struct auth_hash *axf = NULL; struct enc_xform *exf = NULL; caddr_t buf = (caddr_t)crp->crp_buf; uint32_t *blkp; int aadlen, blksz, i, ivlen, len, iskip, oskip, r; ivlen = blksz = iskip = oskip = 0; for (crd = crp->crp_desc; crd; crd = crd->crd_next) { - for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; + for (sw = crypto_get_driver_session(crp->crp_session); sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next) ; if (sw == NULL) return (EINVAL); switch (sw->sw_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_NIST_GMAC: swe = sw; crde = crd; exf = swe->sw_exf; ivlen = 12; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: swa = sw; crda = crd; axf = swa->sw_axf; if (swa->sw_ictx == 0) return (EINVAL); bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; break; default: return (EINVAL); } } if (crde == NULL || crda == NULL) return (EINVAL); if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) return (EINVAL); if (crde->crd_klen != crda->crd_klen) return (EINVAL); /* Initialize the IV */ if (crde->crd_flags & CRD_F_ENCRYPT) { /* IV explicitly provided ? */ if (crde->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crde->crd_iv, iv, ivlen); else arc4rand(iv, ivlen, 0); /* Do we need to write the IV */ if (!(crde->crd_flags & CRD_F_IV_PRESENT)) crypto_copyback(crp->crp_flags, buf, crde->crd_inject, ivlen, iv); } else { /* Decryption */ /* IV explicitly provided ? */ if (crde->crd_flags & CRD_F_IV_EXPLICIT) bcopy(crde->crd_iv, iv, ivlen); else { /* Get IV off buf */ crypto_copydata(crp->crp_flags, buf, crde->crd_inject, ivlen, iv); } } /* Supply MAC with IV */ if (axf->Reinit) axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ aadlen = crda->crd_len; for (i = iskip; i < crda->crd_len; i += blksz) { len = MIN(crda->crd_len - i, blksz - oskip); crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, blk + oskip); bzero(blk + len + oskip, blksz - len - oskip); axf->Update(&ctx, blk, blksz); oskip = 0; /* reset initial output offset */ } if (exf->reinit) exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ for (i = 0; i < crde->crd_len; i += len) { if (exf->encrypt_multi != NULL) { len = rounddown(crde->crd_len - i, blksz); if (len == 0) len = blksz; else len = MIN(len, sizeof(blkbuf)); } else len = blksz; len = MIN(crde->crd_len - i, len); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, blk); if (crde->crd_flags & CRD_F_ENCRYPT) { if (exf->encrypt_multi != NULL) exf->encrypt_multi(swe->sw_kschedule, blk, len); else exf->encrypt(swe->sw_kschedule, blk); axf->Update(&ctx, blk, len); crypto_copyback(crp->crp_flags, buf, crde->crd_skip + i, len, blk); } else { axf->Update(&ctx, blk, len); } } /* Do any required special finalization */ switch (crda->crd_alg) { case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: /* length block */ bzero(blk, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(aadlen * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crde->crd_len * 8); axf->Update(&ctx, blk, blksz); break; } /* Finalize MAC */ axf->Final(aalg, &ctx); /* Validate tag */ if (!(crde->crd_flags & CRD_F_ENCRYPT)) { crypto_copydata(crp->crp_flags, buf, crda->crd_inject, axf->hashsize, uaalg); r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); if (r == 0) { /* tag matches, decrypt data */ for (i = 0; i < crde->crd_len; i += blksz) { len = MIN(crde->crd_len - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, blk); exf->decrypt(swe->sw_kschedule, blk); crypto_copyback(crp->crp_flags, buf, crde->crd_skip + i, len, blk); } } else return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp->crp_flags, buf, crda->crd_inject, axf->hashsize, aalg); } return (0); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, int flags) { u_int8_t *data, *out; struct comp_algo *cxf; int adj; u_int32_t result; cxf = sw->sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); if (crd->crd_flags & CRD_F_COMP) result = cxf->compress(data, crd->crd_len, &out); else result = cxf->decompress(data, crd->crd_len, &out); free(data, M_CRYPTO_DATA); if (result == 0) return EINVAL; /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ sw->sw_size = result; /* Check the compressed size when doing compression */ if (crd->crd_flags & CRD_F_COMP) { if (result >= crd->crd_len) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return 0; } } crypto_copyback(flags, buf, crd->crd_skip, result, out); if (result < crd->crd_len) { adj = result - crd->crd_len; if (flags & CRYPTO_F_IMBUF) { adj = result - crd->crd_len; m_adj((struct mbuf *)buf, adj); } else if (flags & CRYPTO_F_IOV) { struct uio *uio = (struct uio *)buf; int ind; adj = crd->crd_len - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } } free(out, M_CRYPTO_DATA); return 0; } /* * Generate a new software session. */ static int -swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) +swcr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) { - struct swcr_data **swd; + struct swcr_data **swd, *ses; struct auth_hash *axf; struct enc_xform *txf; struct comp_algo *cxf; - u_int32_t i; int len; int error; - if (sid == NULL || cri == NULL) + if (cses == NULL || cri == NULL) return EINVAL; - rw_wlock(&swcr_sessions_lock); - if (swcr_sessions) { - for (i = 1; i < swcr_sesnum; i++) - if (swcr_sessions[i] == NULL) - break; - } else - i = 1; /* NB: to silence compiler warning */ + ses = crypto_get_driver_session(cses); + swd = &ses; - if (swcr_sessions == NULL || i == swcr_sesnum) { - if (swcr_sessions == NULL) { - i = 1; /* We leave swcr_sessions[0] empty */ - swcr_sesnum = CRYPTO_SW_SESSIONS; - } else - swcr_sesnum *= 2; - - swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), - M_CRYPTO_DATA, M_NOWAIT|M_ZERO); - if (swd == NULL) { - /* Reset session number */ - if (swcr_sesnum == CRYPTO_SW_SESSIONS) - swcr_sesnum = 0; - else - swcr_sesnum /= 2; - rw_wunlock(&swcr_sessions_lock); - return ENOBUFS; - } - - /* Copy existing sessions */ - if (swcr_sessions != NULL) { - bcopy(swcr_sessions, swd, - (swcr_sesnum / 2) * sizeof(struct swcr_data *)); - free(swcr_sessions, M_CRYPTO_DATA); - } - - swcr_sessions = swd; - } - - rw_downgrade(&swcr_sessions_lock); - swd = &swcr_sessions[i]; - *sid = i; - while (cri) { - *swd = malloc(sizeof(struct swcr_data), - M_CRYPTO_DATA, M_NOWAIT|M_ZERO); + if (*swd == NULL) + *swd = malloc(sizeof(struct swcr_data), + M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (*swd == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } switch (cri->cri_alg) { case CRYPTO_DES_CBC: txf = &enc_xform_des; goto enccommon; case CRYPTO_3DES_CBC: txf = &enc_xform_3des; goto enccommon; case CRYPTO_BLF_CBC: txf = &enc_xform_blf; goto enccommon; case CRYPTO_CAST_CBC: txf = &enc_xform_cast5; goto enccommon; case CRYPTO_SKIPJACK_CBC: txf = &enc_xform_skipjack; goto enccommon; case CRYPTO_RIJNDAEL128_CBC: txf = &enc_xform_rijndael128; goto enccommon; case CRYPTO_AES_XTS: txf = &enc_xform_aes_xts; goto enccommon; case CRYPTO_AES_ICM: txf = &enc_xform_aes_icm; goto enccommon; case CRYPTO_AES_NIST_GCM_16: txf = &enc_xform_aes_nist_gcm; goto enccommon; case CRYPTO_AES_NIST_GMAC: txf = &enc_xform_aes_nist_gmac; (*swd)->sw_exf = txf; break; case CRYPTO_CAMELLIA_CBC: txf = &enc_xform_camellia; goto enccommon; case CRYPTO_NULL_CBC: txf = &enc_xform_null; goto enccommon; case CRYPTO_CHACHA20: txf = &enc_xform_chacha20; goto enccommon; enccommon: if (cri->cri_key != NULL) { error = txf->setkey(&((*swd)->sw_kschedule), cri->cri_key, cri->cri_klen / 8); if (error) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return error; } } (*swd)->sw_exf = txf; break; case CRYPTO_MD5_HMAC: axf = &auth_hash_hmac_md5; goto authcommon; case CRYPTO_SHA1_HMAC: axf = &auth_hash_hmac_sha1; goto authcommon; case CRYPTO_SHA2_224_HMAC: axf = &auth_hash_hmac_sha2_224; goto authcommon; case CRYPTO_SHA2_256_HMAC: axf = &auth_hash_hmac_sha2_256; goto authcommon; case CRYPTO_SHA2_384_HMAC: axf = &auth_hash_hmac_sha2_384; goto authcommon; case CRYPTO_SHA2_512_HMAC: axf = &auth_hash_hmac_sha2_512; goto authcommon; case CRYPTO_NULL_HMAC: axf = &auth_hash_null; goto authcommon; case CRYPTO_RIPEMD160_HMAC: axf = &auth_hash_hmac_ripemd_160; authcommon: (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_ictx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_octx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } if (cri->cri_key != NULL) { swcr_authprepare(axf, *swd, cri->cri_key, cri->cri_klen); } (*swd)->sw_mlen = cri->cri_mlen; (*swd)->sw_axf = axf; break; case CRYPTO_MD5_KPDK: axf = &auth_hash_key_md5; goto auth2common; case CRYPTO_SHA1_KPDK: axf = &auth_hash_key_sha1; auth2common: (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_ictx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_octx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } /* Store the key so we can "append" it to the payload */ if (cri->cri_key != NULL) { swcr_authprepare(axf, *swd, cri->cri_key, cri->cri_klen); } (*swd)->sw_mlen = cri->cri_mlen; (*swd)->sw_axf = axf; break; #ifdef notdef case CRYPTO_MD5: axf = &auth_hash_md5; goto auth3common; #endif case CRYPTO_SHA1: axf = &auth_hash_sha1; goto auth3common; case CRYPTO_SHA2_224: axf = &auth_hash_sha2_224; goto auth3common; case CRYPTO_SHA2_256: axf = &auth_hash_sha2_256; goto auth3common; case CRYPTO_SHA2_384: axf = &auth_hash_sha2_384; goto auth3common; case CRYPTO_SHA2_512: axf = &auth_hash_sha2_512; auth3common: (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_ictx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } axf->Init((*swd)->sw_ictx); (*swd)->sw_mlen = cri->cri_mlen; (*swd)->sw_axf = axf; break; case CRYPTO_AES_128_NIST_GMAC: axf = &auth_hash_nist_gmac_aes_128; goto auth4common; case CRYPTO_AES_192_NIST_GMAC: axf = &auth_hash_nist_gmac_aes_192; goto auth4common; case CRYPTO_AES_256_NIST_GMAC: axf = &auth_hash_nist_gmac_aes_256; auth4common: len = cri->cri_klen / 8; if (len != 16 && len != 24 && len != 32) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return EINVAL; } (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_ictx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } axf->Init((*swd)->sw_ictx); axf->Setkey((*swd)->sw_ictx, cri->cri_key, len); (*swd)->sw_axf = axf; break; case CRYPTO_BLAKE2B: axf = &auth_hash_blake2b; goto auth5common; case CRYPTO_BLAKE2S: axf = &auth_hash_blake2s; auth5common: (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if ((*swd)->sw_ictx == NULL) { - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return ENOBUFS; } axf->Setkey((*swd)->sw_ictx, cri->cri_key, cri->cri_klen / 8); axf->Init((*swd)->sw_ictx); (*swd)->sw_axf = axf; break; case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; (*swd)->sw_cxf = cxf; break; default: - swcr_freesession_locked(dev, i); - rw_runlock(&swcr_sessions_lock); + swcr_freesession(dev, cses); return EINVAL; } (*swd)->sw_alg = cri->cri_alg; cri = cri->cri_next; swd = &((*swd)->sw_next); } - rw_runlock(&swcr_sessions_lock); return 0; } -static int -swcr_freesession(device_t dev, u_int64_t tid) +static void +swcr_freesession(device_t dev, crypto_session_t cses) { - int error; - - rw_rlock(&swcr_sessions_lock); - error = swcr_freesession_locked(dev, tid); - rw_runlock(&swcr_sessions_lock); - return error; -} - -/* - * Free a session. - */ -static int -swcr_freesession_locked(device_t dev, u_int64_t tid) -{ - struct swcr_data *swd; + struct swcr_data *ses, *swd, *next; struct enc_xform *txf; struct auth_hash *axf; - u_int32_t sid = CRYPTO_SESID2LID(tid); - if (sid > swcr_sesnum || swcr_sessions == NULL || - swcr_sessions[sid] == NULL) - return EINVAL; + ses = crypto_get_driver_session(cses); - /* Silently accept and return */ - if (sid == 0) - return 0; + for (swd = ses; swd != NULL; swd = next) { + next = swd->sw_next; - while ((swd = swcr_sessions[sid]) != NULL) { - swcr_sessions[sid] = swd->sw_next; - switch (swd->sw_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_BLF_CBC: case CRYPTO_CAST_CBC: case CRYPTO_SKIPJACK_CBC: case CRYPTO_RIJNDAEL128_CBC: case CRYPTO_AES_XTS: case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_NIST_GMAC: case CRYPTO_CAMELLIA_CBC: case CRYPTO_NULL_CBC: case CRYPTO_CHACHA20: txf = swd->sw_exf; if (swd->sw_kschedule) txf->zerokey(&(swd->sw_kschedule)); break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_NULL_HMAC: axf = swd->sw_axf; if (swd->sw_ictx) { bzero(swd->sw_ictx, axf->ctxsize); free(swd->sw_ictx, M_CRYPTO_DATA); } if (swd->sw_octx) { bzero(swd->sw_octx, axf->ctxsize); free(swd->sw_octx, M_CRYPTO_DATA); } break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: axf = swd->sw_axf; if (swd->sw_ictx) { bzero(swd->sw_ictx, axf->ctxsize); free(swd->sw_ictx, M_CRYPTO_DATA); } if (swd->sw_octx) { bzero(swd->sw_octx, swd->sw_klen); free(swd->sw_octx, M_CRYPTO_DATA); } break; case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf = swd->sw_axf; if (swd->sw_ictx) { explicit_bzero(swd->sw_ictx, axf->ctxsize); free(swd->sw_ictx, M_CRYPTO_DATA); } break; case CRYPTO_DEFLATE_COMP: /* Nothing to do */ break; } - free(swd, M_CRYPTO_DATA); + /* OCF owns and frees the primary session object */ + if (swd != ses) + free(swd, M_CRYPTO_DATA); } - return 0; } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct cryptodesc *crd; - struct swcr_data *sw; - u_int32_t lid; + struct swcr_data *sw, *ses; /* Sanity check */ if (crp == NULL) return EINVAL; if (crp->crp_desc == NULL || crp->crp_buf == NULL) { crp->crp_etype = EINVAL; goto done; } - lid = CRYPTO_SESID2LID(crp->crp_sid); - rw_rlock(&swcr_sessions_lock); - if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || - swcr_sessions[lid] == NULL) { - rw_runlock(&swcr_sessions_lock); - crp->crp_etype = ENOENT; - goto done; - } - rw_runlock(&swcr_sessions_lock); + ses = crypto_get_driver_session(crp->crp_session); /* Go through crypto descriptors, processing as we go */ for (crd = crp->crp_desc; crd; crd = crd->crd_next) { /* * Find the crypto context. * * XXX Note that the logic here prevents us from having * XXX the same algorithm multiple times in a session * XXX (or rather, we can but it won't give us the right * XXX results). To do that, we'd need some way of differentiating * XXX between the various instances of an algorithm (so we can * XXX locate the correct crypto context). */ - rw_rlock(&swcr_sessions_lock); - if (swcr_sessions == NULL) { - rw_runlock(&swcr_sessions_lock); - crp->crp_etype = ENOENT; - goto done; - } - for (sw = swcr_sessions[lid]; - sw && sw->sw_alg != crd->crd_alg; + for (sw = ses; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next) ; - rw_runlock(&swcr_sessions_lock); /* No such context ? */ if (sw == NULL) { crp->crp_etype = EINVAL; goto done; } switch (sw->sw_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: case CRYPTO_BLF_CBC: case CRYPTO_CAST_CBC: case CRYPTO_SKIPJACK_CBC: case CRYPTO_RIJNDAEL128_CBC: case CRYPTO_AES_XTS: case CRYPTO_AES_ICM: case CRYPTO_CAMELLIA_CBC: case CRYPTO_CHACHA20: if ((crp->crp_etype = swcr_encdec(crd, sw, crp->crp_buf, crp->crp_flags)) != 0) goto done; break; case CRYPTO_NULL_CBC: crp->crp_etype = 0; break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: if ((crp->crp_etype = swcr_authcompute(crd, sw, crp->crp_buf, crp->crp_flags)) != 0) goto done; break; case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: crp->crp_etype = swcr_authenc(crp); goto done; case CRYPTO_DEFLATE_COMP: if ((crp->crp_etype = swcr_compdec(crd, sw, crp->crp_buf, crp->crp_flags)) != 0) goto done; else crp->crp_olen = (int)sw->sw_size; break; default: /* Unknown/unsupported algorithm */ crp->crp_etype = EINVAL; goto done; } } done: crypto_done(crp); return 0; } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { - rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); - swcr_id = crypto_get_driverid(dev, + swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_data), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return ENOMEM; } #define REGISTER(alg) \ crypto_register(swcr_id, alg, 0,0) REGISTER(CRYPTO_DES_CBC); REGISTER(CRYPTO_3DES_CBC); REGISTER(CRYPTO_BLF_CBC); REGISTER(CRYPTO_CAST_CBC); REGISTER(CRYPTO_SKIPJACK_CBC); REGISTER(CRYPTO_NULL_CBC); REGISTER(CRYPTO_MD5_HMAC); REGISTER(CRYPTO_SHA1_HMAC); REGISTER(CRYPTO_SHA2_224_HMAC); REGISTER(CRYPTO_SHA2_256_HMAC); REGISTER(CRYPTO_SHA2_384_HMAC); REGISTER(CRYPTO_SHA2_512_HMAC); REGISTER(CRYPTO_RIPEMD160_HMAC); REGISTER(CRYPTO_NULL_HMAC); REGISTER(CRYPTO_MD5_KPDK); REGISTER(CRYPTO_SHA1_KPDK); REGISTER(CRYPTO_MD5); REGISTER(CRYPTO_SHA1); REGISTER(CRYPTO_SHA2_224); REGISTER(CRYPTO_SHA2_256); REGISTER(CRYPTO_SHA2_384); REGISTER(CRYPTO_SHA2_512); REGISTER(CRYPTO_RIJNDAEL128_CBC); REGISTER(CRYPTO_AES_XTS); REGISTER(CRYPTO_AES_ICM); REGISTER(CRYPTO_AES_NIST_GCM_16); REGISTER(CRYPTO_AES_NIST_GMAC); REGISTER(CRYPTO_AES_128_NIST_GMAC); REGISTER(CRYPTO_AES_192_NIST_GMAC); REGISTER(CRYPTO_AES_256_NIST_GMAC); REGISTER(CRYPTO_CAMELLIA_CBC); REGISTER(CRYPTO_DEFLATE_COMP); REGISTER(CRYPTO_BLAKE2B); REGISTER(CRYPTO_BLAKE2S); REGISTER(CRYPTO_CHACHA20); #undef REGISTER return 0; } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); - rw_wlock(&swcr_sessions_lock); - free(swcr_sessions, M_CRYPTO_DATA); - swcr_sessions = NULL; - rw_wunlock(&swcr_sessions_lock); - rw_destroy(&swcr_sessions_lock); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);