Index: head/share/man/man9/crypto_buffer.9 =================================================================== --- head/share/man/man9/crypto_buffer.9 (revision 364798) +++ head/share/man/man9/crypto_buffer.9 (revision 364799) @@ -1,307 +1,327 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd May 25, 2020 +.Dd August 12, 2020 .Dt CRYPTO_BUFFER 9 .Os .Sh NAME .Nm crypto_buffer .Nd symmetric cryptographic request buffers .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int .Fo crypto_apply .Fa "struct cryptop *crp" .Fa "int off" .Fa "int len" .Fa "int (*f)(void *, void *, u_int)" .Fa "void *arg" .Fc .Ft int .Fo crypto_apply_buf .Fa "struct crypto_buffer *cb" .Fa "int off" .Fa "int len" .Fa "int (*f)(void *, void *, u_int)" .Fa "void *arg" .Fc .Ft void * .Fo crypto_buffer_contiguous_subsegment .Fa "struct crypto_buffer *cb" .Fa "size_t skip" .Fa "size_t len" .Fc .Ft size_t .Fn crypto_buffer_len "struct crypto_buffer *cb" .Ft void * .Fo crypto_contiguous_subsegment .Fa "struct cryptop *crp" .Fa "size_t skip" .Fa "size_t len" .Fc .Ft void .Fo crypto_cursor_init .Fa "struct crypto_buffer_cursor *cc" .Fa "const struct crypto_buffer *cb" .Fc .Ft void .Fn crypto_cursor_advance "struct crypto_buffer_cursor *cc" "size_t amount" .Ft void .Fo crypto_cursor_copyback .Fa "struct crypto_buffer_cursor *cc" .Fa "int size" .Fa "const void *src" .Fc .Ft void .Fo crypto_cursor_copydata .Fa "struct crypto_buffer_cursor *cc" .Fa "int size" .Fa "void *dst" .Fc .Ft void .Fo crypto_cursor_copydata_noadv .Fa "struct crypto_buffer_cursor *cc" .Fa "int size" .Fa "void *dst" .Fc .Ft void * .Fn crypto_cursor_segbase "struct crypto_buffer_cursor *cc" .Ft size_t .Fn crypto_cursor_seglen "struct crypto_buffer_cursor *cc" .Ft bool .Fn CRYPTO_HAS_OUTPUT_BUFFER "struct cryptop *crp" .Sh DESCRIPTION Symmetric cryptographic requests use data buffers to describe the data to be modified. Requests can either specify a single data buffer whose contents are modified in place, or requests may specify separate data buffers for input and output. .Vt struct crypto_buffer provides an abstraction that permits cryptographic requests to operate on different types of buffers. .Vt struct crypto_cursor allows cryptographic drivers to iterate over a data buffer. .Pp .Fn CRYPTO_HAS_OUTPUT_BUFFER returns true if .Fa crp uses separate buffers for input and output and false if .Fa crp uses a single buffer. .Pp .Fn crypto_buffer_len returns the length of data buffer .Fa cb in bytes. .Pp .Fn crypto_apply_buf invokes a caller-supplied function to a region of the data buffer .Fa cb . The function .Fa f is called one or more times. For each invocation, the first argument to .Fa f is the value of .Fa arg passed to .Fn crypto_apply_buf . The second and third arguments to .Fa f are a pointer and length to a segment of the buffer mapped into the kernel. The function is called enough times to cover the .Fa len bytes of the data buffer which starts at an offset .Fa off . If any invocation of .Fa f returns a non-zero value, .Fn crypto_apply_buf immediately returns that value without invoking .Fa f on any remaining segments of the region, otherwise .Fn crypto_apply_buf returns the value from the final call to .Fa f . .Fn crypto_apply invokes the callback .Fa f on a region of the input data buffer for .Fa crp . .Pp .Fn crypto_buffer_contiguous_subsegment attempts to locate a single, virtually-contiguous segment of the data buffer .Fa cb . The segment must be .Fa len bytes long and start at an offset of .Fa skip bytes. If a segment is found, a pointer to the start of the segment is returned. Otherwise, .Dv NULL is returned. .Fn crypto_contiguous_subsegment attempts to locate a single, virtually-contiguous segment in the input data buffer for .Fa crp . .Ss Data Buffers Data buffers are described by an instance of .Vt struct crypto buffer . The .Fa cb_type member contains the type of the data buffer. The following types are supported: .Bl -tag -width " CRYPTO_BUF_CONTIG" .It Dv CRYPTO_BUF_NONE An invalid buffer. Used to mark the output buffer when a crypto request uses a single data buffer. .It Dv CRYPTO_BUF_CONTIG An array of bytes mapped into the kernel's address space. .It Dv CRYPTO_BUF_UIO A scatter/gather list of kernel buffers as described in .Xr uio 9 . .It Dv CRYPTO_BUF_MBUF A network memory buffer as described in .Xr mbuf 9 . +.It Dv CRYPTO_BUF_VMPAGE +A scatter/gather list of +.Vt vm_page_t +structures describing pages in the kernel's address space. +This buffer type is only available if +.Dv CRYPTO_HAS_VMPAGE +is true. .El .Pp The structure also contains the following type-specific fields: -.Bl -tag -width " cb_buf_len" +.Bl -tag -width " cb_vm_page_offset" .It Fa cb_buf A pointer to the start of a .Dv CRYPTO_BUF_CONTIG data buffer. .It Fa cb_buf_len The length of a .Dv CRYPTO_BUF_CONTIG data buffer .It Fa cb_mbuf A pointer to a .Vt struct mbuf for .Dv CRYPTO_BUF_MBUF . .It Fa cb_uio A pointer to a .Vt struct uio for .Dv CRYPTO_BUF_UIO . +.It Fa cb_vm_page +A pointer to an array of +.Vt struct vm_page +for +.Dv CRYPTO_BUF_VMPAGE . +.It Fa cb_vm_page_len +The total amount of data included in the +.Fa cb_vm_page +array, in bytes. +.It Fa cb_vm_page_offset +Offset in bytes in the first page of +.Fa cb_vm_page +where valid data begins. .El .Ss Cursors Cursors provide a mechanism for iterating over a data buffer. They are primarily intended for use in software drivers which access data buffers via virtual addresses. .Pp .Fn crypto_cursor_init initializes the cursor .Fa cc to reference the start of the data buffer .Fa cb . .Pp .Fn crypto_cursor_advance advances the cursor .Fa amount bytes forward in the data buffer. .Pp .Fn crypto_cursor_copyback copies .Fa size bytes from the local buffer pointed to by .Fa src into the data buffer associated with .Fa cc . The bytes are written to the current position of .Fa cc , and the cursor is then advanced by .Fa size bytes. .Pp .Fn crypto_cursor_copydata copies .Fa size bytes out of the data buffer associated with .Fa cc into a local buffer pointed to by .Fa dst . The bytes are read from the current position of .Fa cc , and the cursor is then advanced by .Fa size bytes. .Pp .Fn crypto_cursor_copydata_noadv is similar to .Fn crypto_cursor_copydata except that it does not change the current position of .Fa cc . .Pp .Fn crypto_cursor_segbase and .Fn crypto_cursor_seglen return the start and length, respectively, of the virtually-contiguous segment at the current position of .Fa cc . .Sh RETURN VALUES .Fn crypto_apply and .Fn crypto_apply_buf return the return value from the caller-supplied callback function. .Pp .Fn crypto_buffer_contiguous_subsegment , .Fn crypto_contiguous_subsegment , and .Fn crypto_cursor_segbase , return a pointer to a contiguous segment or .Dv NULL . .Pp .Fn crypto_buffer_len returns the length of a buffer in bytes. .Pp .Fn crypto_cursor_seglen returns the length in bytes of a contiguous segment. .Pp .Fn CRYPTO_HAS_OUTPUT_BUFFER returns true if the request uses a separate output buffer. .Sh SEE ALSO .Xr ipsec 4 , .Xr bus_dma 9 , .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_request 9 , .Xr crypto_driver 9 , .Xr crypto_session 9 , .Xr mbuf 9 .Xr uio 9 Index: head/share/man/man9/crypto_request.9 =================================================================== --- head/share/man/man9/crypto_request.9 (revision 364798) +++ head/share/man/man9/crypto_request.9 (revision 364799) @@ -1,499 +1,511 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd July 16, 2020 +.Dd August 12, 2020 .Dt CRYPTO_REQUEST 9 .Os .Sh NAME .Nm crypto_request .Nd symmetric cryptographic operations .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int .Fn crypto_dispatch "struct cryptop *crp" .Ft void .Fn crypto_destroyreq "struct cryptop *crp" .Ft void .Fn crypto_freereq "struct cryptop *crp" .Ft "struct cryptop *" .Fn crypto_getreq "crypto_session_t cses" "int how" .Ft void .Fn crypto_initreq "crypto_session_t cses" "int how" .Ft void .Fn crypto_use_buf "struct cryptop *crp" "void *buf" "int len" .Ft void .Fn crypto_use_mbuf "struct cryptop *crp" "struct mbuf *m" .Ft void .Fn crypto_use_uio "struct cryptop *crp" "struct uio *uio" .Ft void +.Fn crypto_use_vmpage "struct cryptop *crp" "vm_page_t *pages" "int len" "int offset" +.Ft void .Fn crypto_use_output_buf "struct cryptop *crp" "void *buf" "int len" .Ft void .Fn crypto_use_output_mbuf "struct cryptop *crp" "struct mbuf *m" .Ft void .Fn crypto_use_output_uio "struct cryptop *crp" "struct uio *uio" +.Ft void +.Fn crypto_use_output_vmpage "struct cryptop *crp" "vm_page_t *pages" "int len" "int offset" .Sh DESCRIPTION Each symmetric cryptographic operation in the kernel is described by an instance of .Vt struct cryptop and is associated with an active session. .Pp Requests can either be allocated dynamically or use caller-supplied storage. Dynamically allocated requests should be allocated by .Fn crypto_getreq and freed by .Fn crypto_freereq once the request has completed. Requests using caller-supplied storage should be initialized by .Fn crypto_initreq at the start of each operation and destroyed by .Fn crypto_destroyreq once the request has completed. .Pp For both .Fn crypto_getreq and .Fn crypto_initreq , .Fa cses is a reference to an active session. For .Fn crypto_getreq , .Fa how is passed to .Xr malloc 9 and should be set to either .Dv M_NOWAIT or .Dv M_WAITOK . .Pp Once a request has been initialized, the caller should set fields in the structure to describe request-specific parameters. Unused fields should be left as-is. .Pp .Fn crypto_dispatch passes a crypto request to the driver attached to the request's session. If there are errors in the request's fields, this function may return an error to the caller. If errors are encountered while servicing the request, they will instead be reported to the request's callback function .Pq Fa crp_callback via .Fa crp_etype . .Pp Note that a request's callback function may be invoked before .Fn crypto_dispatch returns. .Pp Once a request has signaled completion by invoking its callback function, it should be freed via .Fn crypto_destroyreq or .Fn crypto_freereq . .Pp Cryptographic operations include several fields to describe the request. .Ss Request Buffers Requests can either specify a single data buffer that is modified in place .Po .Fa crp_buf .Pc or separate input .Po .Fa crp_buf .Pc and output .Po .Fa crp_obuf .Pc buffers. Note that separate input and output buffers are not supported for compression mode requests. .Pp All requests must have a valid .Fa crp_buf initialized by one of the following functions: -.Bl -tag -width "Fn crypto_use_mbuf" +.Bl -tag -width "Fn crypto_use_vmpage" .It Fn crypto_use_buf Uses an array of .Fa len bytes pointed to by .Fa buf as the data buffer. .It Fn crypto_use_mbuf Uses the network memory buffer .Fa m as the data buffer. .It Fn crypto_use_uio Uses the scatter/gather list .Fa uio as the data buffer. +.It Fn crypto_use_vmpage +Uses the array of +.Vt vm_page_t +structures as the data buffer. .El .Pp One of the following functions should be used to initialize .Fa crp_obuf for requests that use separate input and output buffers: -.Bl -tag -width "Fn crypto_use_output_mbuf" +.Bl -tag -width "Fn crypto_use_output_vmpage" .It Fn crypto_use_output_buf Uses an array of .Fa len bytes pointed to by .Fa buf as the output buffer. .It Fn crypto_use_output_mbuf Uses the network memory buffer .Fa m as the output buffer. .It Fn crypto_use_output_uio Uses the scatter/gather list .Fa uio as the output buffer. +.It Fn crypto_use_output_vmpage +Uses the array of +.Vt vm_page_t +structures as the output buffer. .El .Ss Request Regions Each request describes one or more regions in the data buffers. Each region is described by an offset relative to the start of a data buffer and a length. The length of some regions is the same for all requests belonging to a session. Those lengths are set in the session parameters of the associated session. All requests must define a payload region. Other regions are only required for specific session modes. .Pp For requests with separate input and output data buffers, the AAD, IV, and payload regions are always defined as regions in the input buffer, and a separate payload output region is defined to hold the output of encryption or decryption in the output buffer. The digest region describes a region in the input data buffer for requests that verify an existing digest. For requests that compute a digest, the digest region describes a region in the output data buffer. Note that the only data written to the output buffer is the encryption or decryption result and any computed digest. AAD and IV regions are not copied from the input buffer into the output buffer but are only used as inputs. .Pp The following regions are defined: .Bl -column "Payload Output" "Input/Output" .It Sy Region Ta Sy Buffer Ta Sy Description .It AAD Ta Input Ta Embedded Additional Authenticated Data .It IV Ta Input Ta Embedded IV or nonce .It Payload Ta Input Ta Data to encrypt, decrypt, compress, or decompress .It Payload Output Ta Output Ta Encrypted or decrypted data .It Digest Ta Input/Output Ta Authentication digest, hash, or tag .El .Bl -column "Payload Output" ".Fa crp_payload_output_start" .It Sy Region Ta Sy Start Ta Sy Length .It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length .It IV Ta Fa crp_iv_start Ta Fa csp_ivlen .It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length .It Payload Output Ta Fa crp_payload_output_start Ta Fa crp_payload_length .It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen .El .Pp Requests are permitted to operate on only a subset of the data buffer. For example, requests from IPsec operate on network packets that include headers not used as either additional authentication data (AAD) or payload data. .Ss Request Operations All requests must specify the type of operation to perform in .Fa crp_op . Available operations depend on the session's mode. .Pp Compression requests support the following operations: .Bl -tag -width CRYPTO_OP_DECOMPRESS .It Dv CRYPTO_OP_COMPRESS Compress the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECOMPRESS Decompress the data in the payload region of the data buffer. .El .Pp Cipher requests support the following operations: .Bl -tag -width CRYPTO_OP_DECRYPT .It Dv CRYPTO_OP_ENCRYPT Encrypt the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECRYPT Decrypt the data in the payload region of the data buffer. .El .Pp Digest requests support the following operations: .Bl -tag -width CRYPTO_OP_COMPUTE_DIGEST .It Dv CRYPTO_OP_COMPUTE_DIGEST Calculate a digest over the payload region of the data buffer and store the result in the digest region. .It Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the payload region of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, complete the request successfully. If the digests do not match, fail the request with .Er EBADMSG . .El .Pp AEAD and Encrypt-then-Authenticate requests support the following operations: .Bl -tag -width CRYPTO_OP .It Dv CRYPTO_OP_ENCRYPT | Dv CRYPTO_OP_COMPUTE_DIGEST Encrypt the data in the payload region of the data buffer. Calculate a digest over the AAD and payload regions and store the result in the data buffer. .It Dv CRYPTO_OP_DECRYPT | Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the AAD and payload regions of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, decrypt the payload region. If the digests do not match, fail the request with .Er EBADMSG . .El .Ss Request AAD AEAD and Encrypt-then-Authenticate requests may optionally include Additional Authenticated Data. AAD may either be supplied in the AAD region of the input buffer or as a single buffer pointed to by .Fa crp_aad . In either case, .Fa crp_aad_length always indicates the amount of AAD in bytes. .Ss Request IV and/or Nonce Some cryptographic operations require an IV or nonce as an input. An IV may be stored either in the IV region of the data buffer or in .Fa crp_iv . By default, the IV is assumed to be stored in the IV region. If the IV is stored in .Fa crp_iv , .Dv CRYPTO_F_IV_SEPARATE should be set in .Fa crp_flags and .Fa crp_iv_start should be left as zero. .Pp Requests that store part, but not all, of the IV in the data buffer should store the partial IV in the data buffer and pass the full IV separately in .Fa crp_iv . .Ss Request and Callback Scheduling The crypto framework provides multiple methods of scheduling the dispatch of requests to drivers along with the processing of driver callbacks. Requests use flags in .Fa crp_flags to select the desired scheduling methods. .Pp .Fn crypto_dispatch can pass the request to the session's driver via three different methods: .Bl -enum .It The request is queued to a taskqueue backed by a pool of worker threads. By default the pool is sized to provide one thread for each CPU. Worker threads dequeue requests and pass them to the driver asynchronously. .It The request is passed to the driver synchronously in the context of the thread invoking .Fn crypto_dispatch . .It The request is queued to a queue of pending requests. A single worker thread dequeues requests and passes them to the driver asynchronously. .El .Pp To select the first method (taskqueue backed by multiple threads), requests should set .Dv CRYPTO_F_ASYNC . To always use the third method (queue to single worker thread), requests should set .Dv CRYPTO_F_BATCH . If both flags are set, .Dv CRYPTO_F_ASYNC takes precedence. If neither flag is set, .Fn crypto_dispatch will first attempt the second method (invoke driver synchronously). If the driver is blocked, the request will be queued using the third method. One caveat is that the first method is only used for requests using software drivers which use host CPUs to process requests. Requests whose session is associated with a hardware driver will ignore .Dv CRYPTO_F_ASYNC and only use .Dv CRYPTO_F_BATCH to determine how requests should be scheduled. .Pp In addition to bypassing synchronous dispatch in .Fn crypto_dispatch , .Dv CRYPTO_F_BATCH requests additional changes aimed at optimizing batches of requests to the same driver. When the worker thread processes a request with .Dv CRYPTO_F_BATCH , it will search the pending request queue for any other requests for the same driver, including requests from different sessions. If any other requests are present, .Dv CRYPTO_HINT_MORE is passed to the driver's process method. Drivers may use this to batch completion interrupts. .Pp Callback function scheduling is simpler than request scheduling. Callbacks can either be invoked synchronously from .Fn crypto_done , or they can be queued to a pool of worker threads. This pool of worker threads is also sized to provide one worker thread for each CPU by default. Note that a callback function invoked synchronously from .Fn crypto_done must follow the same restrictions placed on threaded interrupt handlers. .Pp By default, callbacks are invoked asynchronously by a worker thread. If .Dv CRYPTO_F_CBIMM is set, the callback is always invoked synchronously from .Fn crypto_done . If .Dv CRYPTO_F_CBIFSYNC is set, the callback is invoked synchronously if the request was processed by a software driver or asynchronously if the request was processed by a hardware driver. .Pp If a request was scheduled to the taskqueue via .Dv CRYPTO_F_ASYNC , callbacks are always invoked asynchronously ignoring .Dv CRYPTO_F_CBIMM and .Dv CRYPTO_F_CBIFSYNC . In this case, .Dv CRYPTO_F_ASYNC_KEEPORDER may be set to ensure that callbacks for requests on a given session are invoked in the same order that requests were queued to the session via .Fn crypto_dispatch . This flag is used by IPsec to ensure that decrypted network packets are passed up the network stack in roughly the same order they were received. .Pp .Ss Other Request Fields In addition to the fields and flags enumerated above, .Vt struct cryptop includes the following: .Bl -tag -width crp_payload_length .It Fa crp_session A reference to the active session. This is set when the request is created by .Fn crypto_getreq and should not be modified. Drivers can use this to fetch driver-specific session state or session parameters. .It Fa crp_etype Error status. Either zero on success, or an error if a request fails. Set by drivers prior to completing a request via .Fn crypto_done . .It Fa crp_flags A bitmask of flags. The following flags are available in addition to flags discussed previously: .Bl -tag -width CRYPTO_F_DONE .It Dv CRYPTO_F_DONE Set by .Fa crypto_done before calling .Fa crp_callback . This flag is not very useful and will likely be removed in the future. It can only be safely checked from the callback routine at which point it is always set. .El .It Fa crp_cipher_key Pointer to a request-specific encryption key. If this value is not set, the request uses the session encryption key. .It Fa crp_auth_key Pointer to a request-specific authentication key. If this value is not set, the request uses the session authentication key. .It Fa crp_opaque An opaque pointer. This pointer permits users of the cryptographic framework to store information about a request to be used in the callback. .It Fa crp_callback Callback function. This must point to a callback function of type .Vt void (*)(struct cryptop *) . The callback function should inspect .Fa crp_etype to determine the status of the completed operation. It should also arrange for the request to be freed via .Fn crypto_freereq . .It Fa crp_olen Used with compression and decompression requests to describe the updated length of the payload region in the data buffer. .Pp If a compression request increases the size of the payload, then the data buffer is unmodified, the request completes successfully, and .Fa crp_olen is set to the size the compressed data would have used. Callers can compare this to the payload region length to determine if the compressed data was discarded. .El .Sh RETURN VALUES .Fn crypto_dispatch returns an error if the request contained invalid fields, or zero if the request was valid. .Fn crypto_getreq returns a pointer to a new request structure on success, or .Dv NULL on failure. .Dv NULL can only be returned if .Dv M_NOWAIT was passed in .Fa how . .Sh SEE ALSO .Xr ipsec 4 , .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_session 9 , .Xr mbuf 9 .Xr uio 9 .Sh BUGS Not all drivers properly handle mixing session and per-request keys within a single session. Consumers should either use a single key for a session specified in the session parameters or always use per-request keys. Index: head/sys/crypto/ccp/ccp.c =================================================================== --- head/sys/crypto/ccp/ccp.c (revision 364798) +++ head/sys/crypto/ccp/ccp.c (revision 364799) @@ -1,790 +1,794 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017 Chelsio Communications, Inc. * Copyright (c) 2017 Conrad Meyer * All rights reserved. * Largely borrowed from ccr(4), Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include "cryptodev_if.h" #include "ccp.h" #include "ccp_hardware.h" MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto"); /* * Need a global softc available for garbage random_source API, which lacks any * context pointer. It's also handy for debugging. */ struct ccp_softc *g_ccp_softc; bool g_debug_print = false; SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0, "Set to enable debugging log messages"); static struct pciid { uint32_t devid; const char *desc; } ccp_ids[] = { { 0x14561022, "AMD CCP-5a" }, { 0x14681022, "AMD CCP-5b" }, { 0x15df1022, "AMD CCP-5a" }, }; static struct random_source random_ccp = { .rs_ident = "AMD CCP TRNG", .rs_source = RANDOM_PURE_CCP, .rs_read = random_ccp_read, }; /* * ccp_populate_sglist() generates a scatter/gather list that covers the entire * crypto operation buffer. */ static int ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); switch (cb->cb_type) { case CRYPTO_BUF_MBUF: error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_VMPAGE: + error = sglist_append_vmpages(sg, cb->cb_vm_page, + cb->cb_vm_page_len, cb->cb_vm_page_offset); + break; default: error = EINVAL; } return (error); } static int ccp_probe(device_t dev) { struct pciid *ip; uint32_t id; id = pci_get_devid(dev); for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) { if (id == ip->devid) { device_set_desc(dev, ip->desc); return (0); } } return (ENXIO); } static void ccp_initialize_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; qp->cq_softc = sc; qp->cq_qindex = i; mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF); /* XXX - arbitrarily chosen sizes */ qp->cq_sg_crp = sglist_alloc(32, M_WAITOK); /* Two more SGEs than sg_crp to accommodate ipad. */ qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK); qp->cq_sg_dst = sglist_alloc(2, M_WAITOK); } } static void ccp_free_queues(struct ccp_softc *sc) { struct ccp_queue *qp; size_t i; for (i = 0; i < nitems(sc->queues); i++) { qp = &sc->queues[i]; mtx_destroy(&qp->cq_lock); sglist_free(qp->cq_sg_crp); sglist_free(qp->cq_sg_ulptx); sglist_free(qp->cq_sg_dst); } } static int ccp_attach(device_t dev) { struct ccp_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session), CRYPTOCAP_F_HARDWARE); if (sc->cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } error = ccp_hw_attach(dev); if (error != 0) return (error); mtx_init(&sc->lock, "ccp", NULL, MTX_DEF); ccp_initialize_queues(sc); if (g_ccp_softc == NULL) { g_ccp_softc = sc; if ((sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_register(&random_ccp); } return (0); } static int ccp_detach(device_t dev) { struct ccp_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->lock); sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0) random_source_deregister(&random_ccp); ccp_hw_detach(dev); ccp_free_queues(sc); if (g_ccp_softc == sc) g_ccp_softc = NULL; mtx_destroy(&sc->lock); return (0); } static void ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen) { union authctx auth_ctx; struct auth_hash *axf; u_int i; /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ axf = s->hmac.auth_hash; if (klen > axf->blocksize) { axf->Init(&auth_ctx); axf->Update(&auth_ctx, key, klen); axf->Final(s->hmac.ipad, &auth_ctx); explicit_bzero(&auth_ctx, sizeof(auth_ctx)); klen = axf->hashsize; } else memcpy(s->hmac.ipad, key, klen); memset(s->hmac.ipad + klen, 0, axf->blocksize - klen); memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); for (i = 0; i < axf->blocksize; i++) { s->hmac.ipad[i] ^= HMAC_IPAD_VAL; s->hmac.opad[i] ^= HMAC_OPAD_VAL; } } static bool ccp_aes_check_keylen(int alg, int klen) { switch (klen * 8) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (false); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (false); break; default: return (false); } return (true); } static void ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen) { unsigned kbits; if (alg == CRYPTO_AES_XTS) kbits = (klen / 2) * 8; else kbits = klen * 8; switch (kbits) { case 128: s->blkcipher.cipher_type = CCP_AES_TYPE_128; break; case 192: s->blkcipher.cipher_type = CCP_AES_TYPE_192; break; case 256: s->blkcipher.cipher_type = CCP_AES_TYPE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); } static bool ccp_auth_supported(struct ccp_softc *sc, const struct crypto_session_params *csp) { if ((sc->hw_features & VERSION_CAP_SHA) == 0) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (csp->csp_auth_key == NULL) return (false); break; default: return (false); } return (true); } static bool ccp_cipher_supported(struct ccp_softc *sc, const struct crypto_session_params *csp) { if ((sc->hw_features & VERSION_CAP_AES) == 0) return (false); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); break; default: return (false); } return (ccp_aes_check_keylen(csp->csp_cipher_alg, csp->csp_cipher_klen)); } static int ccp_probesession(device_t dev, const struct crypto_session_params *csp) { struct ccp_softc *sc; if (csp->csp_flags != 0) return (EINVAL); sc = device_get_softc(dev); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!ccp_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!ccp_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > AES_GMAC_HASH_LEN) return (EINVAL); if ((sc->hw_features & VERSION_CAP_AES) == 0) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!ccp_auth_supported(sc, csp) || !ccp_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int ccp_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ccp_softc *sc; struct ccp_session *s; struct auth_hash *auth_hash; enum ccp_aes_mode cipher_mode; unsigned auth_mode; unsigned q; /* XXX reconcile auth_mode with use by ccp_sha */ switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = SHA1; break; case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = SHA2_256; break; case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = SHA2_384; break; case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = SHA2_512; break; default: auth_hash = NULL; auth_mode = 0; break; } switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: cipher_mode = CCP_AES_MODE_CBC; break; case CRYPTO_AES_ICM: cipher_mode = CCP_AES_MODE_CTR; break; case CRYPTO_AES_NIST_GCM_16: cipher_mode = CCP_AES_MODE_GCTR; break; case CRYPTO_AES_XTS: cipher_mode = CCP_AES_MODE_XTS; break; default: cipher_mode = CCP_AES_MODE_ECB; break; } sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } s = crypto_get_driver_session(cses); /* Just grab the first usable queue for now. */ for (q = 0; q < nitems(sc->queues); q++) if ((sc->valid_queues & (1 << q)) != 0) break; if (q == nitems(sc->queues)) { mtx_unlock(&sc->lock); return (ENXIO); } s->queue = q; switch (csp->csp_mode) { case CSP_MODE_AEAD: s->mode = GCM; break; case CSP_MODE_ETA: s->mode = AUTHENC; break; case CSP_MODE_DIGEST: s->mode = HMAC; break; case CSP_MODE_CIPHER: s->mode = BLKCIPHER; break; } if (s->mode == GCM) { if (csp->csp_auth_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = csp->csp_auth_mlen; } else if (auth_hash != NULL) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; if (csp->csp_auth_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = csp->csp_auth_mlen; ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen); } if (cipher_mode != CCP_AES_MODE_ECB) { s->blkcipher.cipher_mode = cipher_mode; if (csp->csp_cipher_key != NULL) ccp_aes_setkey(s, csp->csp_cipher_alg, csp->csp_cipher_key, csp->csp_cipher_klen); } s->active = true; mtx_unlock(&sc->lock); return (0); } static void ccp_freesession(device_t dev, crypto_session_t cses) { struct ccp_session *s; s = crypto_get_driver_session(cses); if (s->pending != 0) device_printf(dev, "session %p freed with %d pending requests\n", s, s->pending); s->active = false; } static int ccp_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ccp_softc *sc; struct ccp_queue *qp; struct ccp_session *s; int error; bool qpheld; qpheld = false; qp = NULL; csp = crypto_get_params(crp->crp_session); s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); mtx_lock(&sc->lock); qp = &sc->queues[s->queue]; mtx_unlock(&sc->lock); error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT); if (error != 0) goto out; qpheld = true; error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf); if (error != 0) goto out; if (crp->crp_auth_key != NULL) { KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC")); ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen); } if (crp->crp_cipher_key != NULL) ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key, csp->csp_cipher_klen); switch (s->mode) { case HMAC: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_hmac(qp, s, crp); break; case BLKCIPHER: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_blkcipher(qp, s, crp); break; case AUTHENC: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_authenc(qp, s, crp); break; case GCM: if (s->pending != 0) { error = EAGAIN; break; } error = ccp_gcm(qp, s, crp); break; } if (error == 0) s->pending++; out: if (qpheld) { if (error != 0) { /* * Squash EAGAIN so callers don't uselessly and * expensively retry if the ring was full. */ if (error == EAGAIN) error = ENOMEM; ccp_queue_abort(qp); } else ccp_queue_release(qp); } if (error != 0) { DPRINTF(dev, "%s: early error:%d\n", __func__, error); crp->crp_etype = error; crypto_done(crp); } return (0); } static device_method_t ccp_methods[] = { DEVMETHOD(device_probe, ccp_probe), DEVMETHOD(device_attach, ccp_attach), DEVMETHOD(device_detach, ccp_detach), DEVMETHOD(cryptodev_probesession, ccp_probesession), DEVMETHOD(cryptodev_newsession, ccp_newsession), DEVMETHOD(cryptodev_freesession, ccp_freesession), DEVMETHOD(cryptodev_process, ccp_process), DEVMETHOD_END }; static driver_t ccp_driver = { "ccp", ccp_methods, sizeof(struct ccp_softc) }; static devclass_t ccp_devclass; DRIVER_MODULE(ccp, pci, ccp_driver, ccp_devclass, NULL, NULL); MODULE_VERSION(ccp, 1); MODULE_DEPEND(ccp, crypto, 1, 1, 1); MODULE_DEPEND(ccp, random_device, 1, 1, 1); #if 0 /* There are enough known issues that we shouldn't load automatically */ MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids, nitems(ccp_ids)); #endif static int ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags) { struct ccp_softc *sc; mtx_assert(&qp->cq_lock, MA_OWNED); sc = qp->cq_softc; if (n < 1 || n >= (1 << sc->ring_size_order)) return (EINVAL); while (true) { if (ccp_queue_get_ring_space(qp) >= n) return (0); if ((mflags & M_WAITOK) == 0) return (EAGAIN); qp->cq_waiting = true; msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0); } } int ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags) { int error; mtx_lock(&qp->cq_lock); qp->cq_acq_tail = qp->cq_tail; error = ccp_queue_reserve_space(qp, n, mflags); if (error != 0) mtx_unlock(&qp->cq_lock); return (error); } void ccp_queue_release(struct ccp_queue *qp) { mtx_assert(&qp->cq_lock, MA_OWNED); if (qp->cq_tail != qp->cq_acq_tail) { wmb(); ccp_queue_write_tail(qp); } mtx_unlock(&qp->cq_lock); } void ccp_queue_abort(struct ccp_queue *qp) { unsigned i; mtx_assert(&qp->cq_lock, MA_OWNED); /* Wipe out any descriptors associated with this aborted txn. */ for (i = qp->cq_acq_tail; i != qp->cq_tail; i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) { memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i])); } qp->cq_tail = qp->cq_acq_tail; mtx_unlock(&qp->cq_lock); } #ifdef DDB #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo) #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object) static void db_show_ccp_sc(struct ccp_softc *sc) { db_printf("ccp softc at %p\n", sc); db_printf(" cid: %d\n", (int)sc->cid); db_printf(" lock: "); db_show_lock(&sc->lock); db_printf(" detaching: %d\n", (int)sc->detaching); db_printf(" ring_size_order: %u\n", sc->ring_size_order); db_printf(" hw_version: %d\n", (int)sc->hw_version); db_printf(" hw_features: %b\n", (int)sc->hw_features, "\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA" "\11SHA\0103DES\07AES"); db_printf(" hw status:\n"); db_ccp_show_hw(sc); } static void db_show_ccp_qp(struct ccp_queue *qp) { db_printf(" lock: "); db_show_lock(&qp->cq_lock); db_printf(" cq_qindex: %u\n", qp->cq_qindex); db_printf(" cq_softc: %p\n", qp->cq_softc); db_printf(" head: %u\n", qp->cq_head); db_printf(" tail: %u\n", qp->cq_tail); db_printf(" acq_tail: %u\n", qp->cq_acq_tail); db_printf(" desc_ring: %p\n", qp->desc_ring); db_printf(" completions_ring: %p\n", qp->completions_ring); db_printf(" descriptors (phys): 0x%jx\n", (uintmax_t)qp->desc_ring_bus_addr); db_printf(" hw status:\n"); db_ccp_show_queue_hw(qp); } DB_SHOW_COMMAND(ccp, db_show_ccp) { struct ccp_softc *sc; unsigned unit, qindex; if (!have_addr) goto usage; unit = (unsigned)addr; sc = devclass_get_softc(ccp_devclass, unit); if (sc == NULL) { db_printf("No such device ccp%u\n", unit); goto usage; } if (count == -1) { db_show_ccp_sc(sc); return; } qindex = (unsigned)count; if (qindex >= nitems(sc->queues)) { db_printf("No such queue %u\n", qindex); goto usage; } db_show_ccp_qp(&sc->queues[qindex]); return; usage: db_printf("usage: show ccp [,]\n"); return; } #endif /* DDB */ Index: head/sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 364798) +++ head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 364799) @@ -1,2932 +1,2936 @@ /*- * Copyright (c) 2017 Chelsio Communications, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "common/common.h" #include "crypto/t4_crypto.h" /* * Requests consist of: * * +-------------------------------+ * | struct fw_crypto_lookaside_wr | * +-------------------------------+ * | struct ulp_txpkt | * +-------------------------------+ * | struct ulptx_idata | * +-------------------------------+ * | struct cpl_tx_sec_pdu | * +-------------------------------+ * | struct cpl_tls_tx_scmd_fmt | * +-------------------------------+ * | key context header | * +-------------------------------+ * | AES key | ----- For requests with AES * +-------------------------------+ * | Hash state | ----- For hash-only requests * +-------------------------------+ - * | IPAD (16-byte aligned) | \ * +-------------------------------+ +---- For requests with HMAC * | OPAD (16-byte aligned) | / * +-------------------------------+ - * | GMAC H | ----- For AES-GCM * +-------------------------------+ - * | struct cpl_rx_phys_dsgl | \ * +-------------------------------+ +---- Destination buffer for * | PHYS_DSGL entries | / non-hash-only requests * +-------------------------------+ - * | 16 dummy bytes | ----- Only for HMAC/hash-only requests * +-------------------------------+ * | IV | ----- If immediate IV * +-------------------------------+ * | Payload | ----- If immediate Payload * +-------------------------------+ - * | struct ulptx_sgl | \ * +-------------------------------+ +---- If payload via SGL * | SGL entries | / * +-------------------------------+ - * * Note that the key context must be padded to ensure 16-byte alignment. * For HMAC requests, the key consists of the partial hash of the IPAD * followed by the partial hash of the OPAD. * * Replies consist of: * * +-------------------------------+ * | struct cpl_fw6_pld | * +-------------------------------+ * | hash digest | ----- For HMAC request with * +-------------------------------+ 'hash_size' set in work request * * A 32-bit big-endian error status word is supplied in the last 4 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a * "MAC" error and bit 1 indicates a "PAD" error. * * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message * in the request is returned in data[1] of the CPL_FW6_PLD message. * * For block cipher replies, the updated IV is supplied in data[2] and * data[3] of the CPL_FW6_PLD message. * * For hash replies where the work request set 'hash_size' to request * a copy of the hash in the reply, the hash digest is supplied * immediately following the CPL_FW6_PLD message. */ /* * The crypto engine supports a maximum AAD size of 511 bytes. */ #define MAX_AAD_LEN 511 /* * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG * entries. While the CPL includes a 16-bit length field, the T6 can * sometimes hang if an error occurs while processing a request with a * single DSGL entry larger than 2k. */ #define MAX_RX_PHYS_DSGL_SGE 32 #define DSGL_SGE_MAXLEN 2048 /* * The adapter only supports requests with a total input or output * length of 64k-1 or smaller. Longer requests either result in hung * requests or incorrect results. */ #define MAX_REQUEST_SIZE 65535 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); struct ccr_session_hmac { struct auth_hash *auth_hash; int hash_len; unsigned int partial_digest_len; unsigned int auth_mode; unsigned int mk_size; char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; }; struct ccr_session_gmac { int hash_len; char ghash_h[GMAC_BLOCK_LEN]; }; struct ccr_session_ccm_mac { int hash_len; }; struct ccr_session_blkcipher { unsigned int cipher_mode; unsigned int key_len; unsigned int iv_len; __be32 key_ctx_hdr; char enckey[CHCR_AES_MAX_KEY_LEN]; char deckey[CHCR_AES_MAX_KEY_LEN]; }; struct ccr_port { struct sge_wrq *txq; struct sge_rxq *rxq; int tx_channel_id; u_int active_sessions; }; struct ccr_session { #ifdef INVARIANTS int pending; #endif enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; struct ccr_port *port; union { struct ccr_session_hmac hmac; struct ccr_session_gmac gmac; struct ccr_session_ccm_mac ccm_mac; }; struct ccr_session_blkcipher blkcipher; struct mtx lock; /* * Pre-allocate S/G lists used when preparing a work request. * 'sg_input' contains an sglist describing the entire input * buffer for a 'struct cryptop'. 'sg_output' contains an * sglist describing the entire output buffer. 'sg_ulptx' is * used to describe the data the engine should DMA as input * via ULPTX_SGL. 'sg_dsgl' is used to describe the * destination that cipher text and a tag should be written * to. */ struct sglist *sg_input; struct sglist *sg_output; struct sglist *sg_ulptx; struct sglist *sg_dsgl; }; struct ccr_softc { struct adapter *adapter; device_t dev; uint32_t cid; struct mtx lock; bool detaching; struct ccr_port ports[MAX_NPORTS]; u_int port_mask; /* * Pre-allocate a dummy output buffer for the IV and AAD for * AEAD requests. */ char *iv_aad_buf; struct sglist *sg_iv_aad; /* Statistics. */ counter_u64_t stats_blkcipher_encrypt; counter_u64_t stats_blkcipher_decrypt; counter_u64_t stats_hash; counter_u64_t stats_hmac; counter_u64_t stats_eta_encrypt; counter_u64_t stats_eta_decrypt; counter_u64_t stats_gcm_encrypt; counter_u64_t stats_gcm_decrypt; counter_u64_t stats_ccm_encrypt; counter_u64_t stats_ccm_decrypt; counter_u64_t stats_wr_nomem; counter_u64_t stats_inflight; counter_u64_t stats_mac_error; counter_u64_t stats_pad_error; counter_u64_t stats_sglist_error; counter_u64_t stats_process_error; counter_u64_t stats_sw_fallback; }; /* * Crypto requests involve two kind of scatter/gather lists. * * Non-hash-only requests require a PHYS_DSGL that describes the * location to store the results of the encryption or decryption * operation. This SGL uses a different format (PHYS_DSGL) and should * exclude the skip bytes at the start of the data as well as any AAD * or IV. For authenticated encryption requests it should include the * destination of the hash or tag. * * The input payload may either be supplied inline as immediate data, * or via a standard ULP_TX SGL. This SGL should include AAD, * ciphertext, and the hash or tag for authenticated decryption * requests. * * These scatter/gather lists can describe different subsets of the * buffers described by the crypto operation. ccr_populate_sglist() * generates a scatter/gather list that covers an entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb) { int error; sglist_reset(sg); switch (cb->cb_type) { case CRYPTO_BUF_MBUF: error = sglist_append_mbuf(sg, cb->cb_mbuf); break; case CRYPTO_BUF_UIO: error = sglist_append_uio(sg, cb->cb_uio); break; case CRYPTO_BUF_CONTIG: error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len); break; + case CRYPTO_BUF_VMPAGE: + error = sglist_append_vmpages(sg, cb->cb_vm_page, + cb->cb_vm_page_len, cb->cb_vm_page_offset); + break; default: error = EINVAL; } return (error); } /* * Segments in 'sg' larger than 'maxsegsize' are counted as multiple * segments. */ static int ccr_count_sgl(struct sglist *sg, int maxsegsize) { int i, nsegs; nsegs = 0; for (i = 0; i < sg->sg_nseg; i++) nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); return (nsegs); } /* These functions deal with PHYS_DSGL for the reply buffer. */ static inline int ccr_phys_dsgl_len(int nsegs) { int len; len = (nsegs / 8) * sizeof(struct phys_sge_pairs); if ((nsegs % 8) != 0) { len += sizeof(uint16_t) * 8; len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); } return (len); } static void ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs) { struct sglist *sg; struct cpl_rx_phys_dsgl *cpl; struct phys_sge_pairs *sgl; vm_paddr_t paddr; size_t seglen; u_int i, j; sg = s->sg_dsgl; cpl = dst; cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | V_CPL_RX_PHYS_DSGL_ISRDMA(0)); cpl->pcirlxorder_to_noofsgentr = htobe32( V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); cpl->rss_hdr_int.hash_val = 0; sgl = (struct phys_sge_pairs *)(cpl + 1); j = 0; for (i = 0; i < sg->sg_nseg; i++) { seglen = sg->sg_segs[i].ss_len; paddr = sg->sg_segs[i].ss_paddr; do { sgl->addr[j] = htobe64(paddr); if (seglen > DSGL_SGE_MAXLEN) { sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); paddr += DSGL_SGE_MAXLEN; seglen -= DSGL_SGE_MAXLEN; } else { sgl->len[j] = htobe16(seglen); seglen = 0; } j++; if (j == 8) { sgl++; j = 0; } } while (seglen != 0); } MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); } /* These functions deal with the ULPTX_SGL for input payload. */ static inline int ccr_ulptx_sgl_len(int nsegs) { u_int n; nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (roundup2(n, 16)); } static void ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs) { struct ulptx_sgl *usgl; struct sglist *sg; struct sglist_seg *ss; int i; sg = s->sg_ulptx; MPASS(nsegs == sg->sg_nseg); ss = &sg->sg_segs[0]; usgl = dst; usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(ss->ss_len); usgl->addr0 = htobe64(ss->ss_paddr); ss++; for (i = 0; i < sg->sg_nseg - 1; i++) { usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); ss++; } } static bool ccr_use_imm_data(u_int transhdr_len, u_int input_len) { if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) return (false); if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > SGE_MAX_WR_LEN) return (false); return (true); } static void ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size, struct cryptop *crp) { u_int cctx_size, idata_len; cctx_size = sizeof(struct _key_ctx) + kctx_len; crwr->wreq.op_to_cctx_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); crwr->wreq.len16_pkd = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); crwr->wreq.session_id = 0; crwr->wreq.rx_chid_to_rx_q_id = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) | V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); crwr->wreq.key_addr = 0; crwr->wreq.pld_size_hash_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); crwr->wreq.cookie = htobe64((uintptr_t)crp); crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DATAMODIFY(0) | V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1)); crwr->ulptx.len = htobe32( ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; if (imm_len % 16 != 0) idata_len -= 16 - imm_len % 16; crwr->sc_imm.len = htobe32(idata_len); } static int ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; u_int hmac_ctrl, imm_len, iopad_size; int error, sgl_nsegs, sgl_len, use_opad; /* Reject requests with too large of an input buffer. */ if (crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); axf = s->hmac.auth_hash; if (s->mode == HMAC) { use_opad = 1; hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; } else { use_opad = 0; hmac_ctrl = SCMD_HMAC_CTRL_NOP; } /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the context includes the aligned IPAD and * OPAD. */ kctx_len = iopad_size; if (use_opad) kctx_len += iopad_size; hash_size_in_response = axf->hashsize; transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); if (crp->crp_payload_length == 0) { imm_len = axf->blocksize; sgl_nsegs = 0; sgl_len = 0; } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { imm_len = crp->crp_payload_length; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(s->sg_ulptx); error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); sgl_nsegs = s->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { counter_u64_add(sc->stats_wr_nomem, 1); return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, hash_size_in_response, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? axf->blocksize : crp->crp_payload_length); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_LAST_FRAG(0) | V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1)); memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; if (crp->crp_payload_length == 0) { dst[0] = 0x80; if (s->mode == HMAC) *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = htobe64(axf->blocksize << 3); } else if (imm_len != 0) crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); else ccr_write_ulptx_sgl(s, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { uint8_t hash[HASH_MAX_LEN]; if (error) return (error); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, hash); if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, (cpl + 1)); return (0); } static int ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int imm_len, iv_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) return (EINVAL); if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && (crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); /* Reject requests with too large of an input buffer. */ if (crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; sglist_reset(s->sg_dsgl); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_payload_output_start, crp->crp_payload_length); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* The 'key' must be 128-bit aligned. */ kctx_len = roundup2(s->blkcipher.key_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* For AES-XTS we send a 16-byte IV in the work request. */ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { imm_len = crp->crp_payload_length; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(s->sg_ulptx); error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); sgl_nsegs = s->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { counter_u64_add(sc->stats_wr_nomem, 1); return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); crypto_read_iv(crp, iv); /* Zero the remainder of the IV for AES-XTS. */ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_CTR: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); else ccr_write_ulptx_sgl(s, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); explicit_bzero(iv, sizeof(iv)); return (0); } static int ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } /* * 'hashsize' is the length of a full digest. 'authsize' is the * requested digest length for this operation which may be less * than 'hashsize'. */ static int ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) { if (authsize == 10) return (SCMD_HMAC_CTRL_TRUNC_RFC4366); if (authsize == 12) return (SCMD_HMAC_CTRL_IPSEC_96BIT); if (authsize == hashsize / 2) return (SCMD_HMAC_CTRL_DIV2); return (SCMD_HMAC_CTRL_NO_TRUNC); } static int ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len, iopad_size, iv_len; u_int aad_start, aad_stop; u_int auth_insert; u_int cipher_start, cipher_stop; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; /* * If there is a need in the future, requests with an empty * payload could be supported as HMAC-only requests. */ if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) return (EINVAL); if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && (crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); /* For AES-XTS we send a 16-byte IV in the work request. */ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) return (EINVAL); axf = s->hmac.auth_hash; hash_size_in_response = s->hmac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The output buffer consists of the cipher text followed by * the hash when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + crp->crp_aad_length + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + crp->crp_aad_length + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(s->sg_dsgl); error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + crp->crp_aad_length); if (error) return (error); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_payload_output_start, crp->crp_payload_length); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_digest_start, hash_size_in_response); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the key context consists of the key followed * by the IPAD and OPAD. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = crp->crp_aad_length + crp->crp_payload_length; /* * The firmware hangs if sent a request which is a * bit smaller than MAX_REQUEST_SIZE. In particular, the * firmware appears to require 512 - 16 bytes of spare room * along with the size of the hash even if the hash isn't * included in the input buffer. */ if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > MAX_REQUEST_SIZE) return (EFBIG); if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(s->sg_ulptx); if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) error = sglist_append(s->sg_ulptx, crp->crp_aad, crp->crp_aad_length); else error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = s->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } /* Any AAD comes after the IV. */ if (crp->crp_aad_length != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crp->crp_aad_length - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crp->crp_aad_length + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { counter_u64_add(sc->stats_wr_nomem, 1); return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); crypto_read_iv(crp, iv); /* Zero the remainder of the IV for AES-XTS. */ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_CTR: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->hmac.pads, iopad_size * 2); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) { if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) memcpy(dst, crp->crp_aad, crp->crp_aad_length); else crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(s, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); explicit_bzero(iv, sizeof(iv)); return (0); } static int ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } static int ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * The crypto engine doesn't handle GCM requests with an empty * payload, so handle those in software instead. */ if (crp->crp_payload_length == 0) return (EMSGSIZE); if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) return (EMSGSIZE); hash_size_in_response = s->gmac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The IV handling for GCM in OCF is a bit more complicated in * that IPSec provides a full 16-byte IV (including the * counter), whereas the /dev/crypto interface sometimes * provides a full 16-byte IV (if no IV is provided in the * ioctl) and sometimes a 12-byte IV (if the IV was explicit). * * When provided a 12-byte IV, assume the IV is really 16 bytes * with a counter in the last 4 bytes initialized to 1. * * While iv_len is checked below, the value is currently * always set to 12 when creating a GCM session in this driver * due to limitations in OCF (there is no way to know what the * IV length of a given request will be). This means that the * driver always assumes as 12-byte IV for now. */ if (s->blkcipher.iv_len == 12) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; /* * GCM requests should always provide an explicit IV. */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + crp->crp_aad_length + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + crp->crp_aad_length + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(s->sg_dsgl); error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + crp->crp_aad_length); if (error) return (error); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_payload_output_start, crp->crp_payload_length); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_digest_start, hash_size_in_response); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of the key followed * by the Galois hash key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = crp->crp_aad_length + crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(s->sg_ulptx); if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) error = sglist_append(s->sg_ulptx, crp->crp_aad, crp->crp_aad_length); else error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = s->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } if (crp->crp_aad_length != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crp->crp_aad_length - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crp->crp_aad_length + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { counter_u64_add(sc->stats_wr_nomem, 1); return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); if (s->blkcipher.iv_len == 12) *(uint32_t *)&iv[12] = htobe32(1); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. On encrypt it * should normally be set to 0 anyway. However, for decrypt * the cipher ends before the tag in the ETA case (and * authstop is set to stop before the tag), but for GCM the * cipher still runs to the end of the buffer. Not sure if * this is intentional or a firmware quirk, but it is required * for working tag validation with GCM decryption. */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) { if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) memcpy(dst, crp->crp_aad, crp->crp_aad_length); else crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(s, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); explicit_bzero(iv, sizeof(iv)); return (0); } static int ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the GMAC hash. */ return (error); } /* * Handle a GCM request that is not supported by the crypto engine by * performing the operation in software. Derived from swcr_authenc(). */ static void ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) { struct auth_hash *axf; struct enc_xform *exf; void *auth_ctx, *kschedule; char block[GMAC_BLOCK_LEN]; char digest[GMAC_DIGEST_LEN]; char iv[AES_BLOCK_LEN]; int error, i, len; auth_ctx = NULL; kschedule = NULL; /* Initialize the MAC. */ switch (s->blkcipher.key_len) { case 16: axf = &auth_hash_nist_gmac_aes_128; break; case 24: axf = &auth_hash_nist_gmac_aes_192; break; case 32: axf = &auth_hash_nist_gmac_aes_256; break; default: error = EINVAL; goto out; } auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); if (auth_ctx == NULL) { error = ENOMEM; goto out; } axf->Init(auth_ctx); axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); /* Initialize the cipher. */ exf = &enc_xform_aes_nist_gcm; kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); if (kschedule == NULL) { error = ENOMEM; goto out; } error = exf->setkey(kschedule, s->blkcipher.enckey, s->blkcipher.key_len); if (error) goto out; /* * This assumes a 12-byte IV from the crp. See longer comment * above in ccr_gcm() for more details. */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { error = EINVAL; goto out; } memcpy(iv, crp->crp_iv, 12); *(uint32_t *)&iv[12] = htobe32(1); axf->Reinit(auth_ctx, iv, sizeof(iv)); /* MAC the AAD. */ if (crp->crp_aad != NULL) { len = rounddown(crp->crp_aad_length, sizeof(block)); if (len != 0) axf->Update(auth_ctx, crp->crp_aad, len); if (crp->crp_aad_length != len) { memset(block, 0, sizeof(block)); memcpy(block, (char *)crp->crp_aad + len, crp->crp_aad_length - len); axf->Update(auth_ctx, block, sizeof(block)); } } else { for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { len = imin(crp->crp_aad_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_aad_start + i, len, block); bzero(block + len, sizeof(block) - len); axf->Update(auth_ctx, block, sizeof(block)); } } exf->reinit(kschedule, iv); /* Do encryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(kschedule, block, block); axf->Update(auth_ctx, block, len); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } else { axf->Update(auth_ctx, block, len); } } /* Length block. */ bzero(block, sizeof(block)); ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); axf->Update(auth_ctx, block, sizeof(block)); /* Finalize MAC. */ axf->Final(digest, auth_ctx); /* Inject or validate tag. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), digest); error = 0; } else { char digest2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { error = 0; /* Tag matches, decrypt data. */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); exf->decrypt(kschedule, block, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } } else error = EBADMSG; explicit_bzero(digest2, sizeof(digest2)); } out: zfree(kschedule, M_CCR); zfree(auth_ctx, M_CCR); explicit_bzero(block, sizeof(block)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(digest, sizeof(digest)); crp->crp_etype = error; crypto_done(crp); } static void generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, const char *iv, char *b0) { u_int i, payload_len; /* NB: L is already set in the first byte of the IV. */ memcpy(b0, iv, CCM_B0_SIZE); /* Set length of hash in bits 3 - 5. */ b0[0] |= (((hash_size_in_response - 2) / 2) << 3); /* Store the payload length as a big-endian value. */ payload_len = crp->crp_payload_length; for (i = 0; i < iv[0]; i++) { b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; payload_len >>= 8; } /* * If there is AAD in the request, set bit 6 in the flags * field and store the AAD length as a big-endian value at the * start of block 1. This only assumes a 16-bit AAD length * since T6 doesn't support large AAD sizes. */ if (crp->crp_aad_length != 0) { b0[0] |= (1 << 6); *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); } } static int ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct ulptx_idata *idata; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; u_int aad_len, b0_len, hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * The crypto engine doesn't handle CCM requests with an empty * payload, so handle those in software instead. */ if (crp->crp_payload_length == 0) return (EMSGSIZE); /* * CCM always includes block 0 in the AAD before AAD from the * request. */ b0_len = CCM_B0_SIZE; if (crp->crp_aad_length != 0) b0_len += CCM_AAD_FIELD_SIZE; aad_len = b0_len + crp->crp_aad_length; /* * CCM requests should always provide an explicit IV (really * the nonce). */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* * Always assume a 12 byte input nonce for now since that is * what OCF always generates. The full IV in the work request * is 16 bytes. */ iv_len = AES_BLOCK_LEN; if (iv_len + aad_len > MAX_AAD_LEN) return (EMSGSIZE); hash_size_in_response = s->ccm_mac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + aad_len + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + aad_len + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(s->sg_dsgl); error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len + aad_len); if (error) return (error); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_payload_output_start, crp->crp_payload_length); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = sglist_append_sglist(s->sg_dsgl, s->sg_output, crp->crp_digest_start, hash_size_in_response); else error = sglist_append_sglist(s->sg_dsgl, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of two copies of * the AES key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, AAD (including block * 0), and then the cipher/plain text. For decryption * requests the hash is appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = aad_len + crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { /* Block 0 is passed as immediate data. */ imm_len = b0_len; sglist_reset(s->sg_ulptx); if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) error = sglist_append(s->sg_ulptx, crp->crp_aad, crp->crp_aad_length); else error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(s->sg_ulptx, s->sg_input, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = s->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } aad_start = iv_len + 1; aad_stop = aad_start + aad_len - 1; cipher_start = aad_stop + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { counter_u64_add(sc->stats_wr_nomem, 1); return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); /* * Read the nonce from the request. Use the nonce to generate * the full IV with the counter set to 0. */ memset(iv, 0, iv_len); iv[0] = (15 - AES_CCM_IV_LEN) - 1; memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. See comments above * in ccr_gcm(). */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), s->blkcipher.enckey, s->blkcipher.key_len); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; generate_ccm_b0(crp, hash_size_in_response, iv, dst); if (sgl_nsegs == 0) { dst += b0_len; if (crp->crp_aad_length != 0) { if (crp->crp_aad != NULL) memcpy(dst, crp->crp_aad, crp->crp_aad_length); else crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else { dst += CCM_B0_SIZE; if (b0_len > CCM_B0_SIZE) { /* * If there is AAD, insert padding including a * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL * is 16-byte aligned. */ KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, ("b0_len mismatch")); memset(dst + CCM_AAD_FIELD_SIZE, 0, 8 - CCM_AAD_FIELD_SIZE); idata = (void *)(dst + 8); idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); idata->len = htobe32(0); dst = (void *)(idata + 1); } ccr_write_ulptx_sgl(s, dst, sgl_nsegs); } /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); explicit_bzero(iv, sizeof(iv)); return (0); } static int ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the CBC MAC * hash. */ return (error); } /* * Handle a CCM request that is not supported by the crypto engine by * performing the operation in software. Derived from swcr_authenc(). */ static void ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) { struct auth_hash *axf; struct enc_xform *exf; union authctx *auth_ctx; void *kschedule; char block[CCM_CBC_BLOCK_LEN]; char digest[AES_CBC_MAC_HASH_LEN]; char iv[AES_CCM_IV_LEN]; int error, i, len; auth_ctx = NULL; kschedule = NULL; /* Initialize the MAC. */ switch (s->blkcipher.key_len) { case 16: axf = &auth_hash_ccm_cbc_mac_128; break; case 24: axf = &auth_hash_ccm_cbc_mac_192; break; case 32: axf = &auth_hash_ccm_cbc_mac_256; break; default: error = EINVAL; goto out; } auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); if (auth_ctx == NULL) { error = ENOMEM; goto out; } axf->Init(auth_ctx); axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); /* Initialize the cipher. */ exf = &enc_xform_ccm; kschedule = malloc(exf->ctxsize, M_CCR, M_NOWAIT); if (kschedule == NULL) { error = ENOMEM; goto out; } error = exf->setkey(kschedule, s->blkcipher.enckey, s->blkcipher.key_len); if (error) goto out; if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { error = EINVAL; goto out; } memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; axf->Reinit(auth_ctx, iv, sizeof(iv)); /* MAC the AAD. */ if (crp->crp_aad != NULL) error = axf->Update(auth_ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, auth_ctx); if (error) goto out; exf->reinit(kschedule, iv); /* Do encryption/decryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(auth_ctx, block, len); exf->encrypt(kschedule, block, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } else { exf->decrypt(kschedule, block, block); axf->Update(auth_ctx, block, len); } } /* Finalize MAC. */ axf->Final(digest, auth_ctx); /* Inject or validate tag. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), digest); error = 0; } else { char digest2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { error = 0; /* Tag matches, decrypt data. */ exf->reinit(kschedule, iv); for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); exf->decrypt(kschedule, block, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } } else error = EBADMSG; explicit_bzero(digest2, sizeof(digest2)); } out: zfree(kschedule, M_CCR); zfree(auth_ctx, M_CCR); explicit_bzero(block, sizeof(block)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(digest, sizeof(digest)); crp->crp_etype = error; crypto_done(crp); } static void ccr_identify(driver_t *driver, device_t parent) { struct adapter *sc; sc = device_get_softc(parent); if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && device_find_child(parent, "ccr", -1) == NULL) device_add_child(parent, "ccr", -1); } static int ccr_probe(device_t dev) { device_set_desc(dev, "Chelsio Crypto Accelerator"); return (BUS_PROBE_DEFAULT); } static void ccr_sysctls(struct ccr_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid, *port_oid; struct sysctl_oid_list *children; char buf[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.ccr.X. */ oid = device_get_sysctl_tree(sc->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, &sc->port_mask, 0, "Mask of enabled ports"); /* * dev.ccr.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, &sc->stats_hash, "Hash requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, &sc->stats_hmac, "HMAC requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, &sc->stats_blkcipher_encrypt, "Cipher encryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, &sc->stats_blkcipher_decrypt, "Cipher decryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD, &sc->stats_eta_encrypt, "Combined AES+HMAC encryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD, &sc->stats_eta_decrypt, "Combined AES+HMAC decryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, &sc->stats_gcm_encrypt, "AES-GCM encryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, &sc->stats_gcm_decrypt, "AES-GCM decryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD, &sc->stats_ccm_encrypt, "AES-CCM encryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD, &sc->stats_ccm_decrypt, "AES-CCM decryption requests submitted"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, &sc->stats_wr_nomem, "Work request memory allocation failures"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, &sc->stats_inflight, "Requests currently pending"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, &sc->stats_mac_error, "MAC errors"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, &sc->stats_pad_error, "Padding errors"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, &sc->stats_sglist_error, "Requests for which DMA mapping failed"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, &sc->stats_process_error, "Requests failed during queueing"); SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD, &sc->stats_sw_fallback, "Requests processed by falling back to software"); /* * dev.ccr.X.stats.port */ port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); for (i = 0; i < nitems(sc->ports); i++) { if (sc->ports[i].rxq == NULL) continue; /* * dev.ccr.X.stats.port.Y */ snprintf(buf, sizeof(buf), "%d", i); oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", CTLFLAG_RD, &sc->ports[i].active_sessions, 0, "Count of active sessions"); } } static void ccr_init_port(struct ccr_softc *sc, int port) { sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; sc->ports[port].rxq = &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq]; sc->ports[port].tx_channel_id = port; _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, "Too many ports to fit in port_mask"); sc->port_mask |= 1u << port; } static int ccr_attach(device_t dev) { struct ccr_softc *sc; int32_t cid; int i; sc = device_get_softc(dev); sc->dev = dev; sc->adapter = device_get_softc(device_get_parent(dev)); for_each_port(sc->adapter, i) { ccr_init_port(sc, i); } cid = crypto_get_driverid(dev, sizeof(struct ccr_session), CRYPTOCAP_F_HARDWARE); if (cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } sc->cid = cid; sc->adapter->ccr_softc = sc; mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); sc->stats_blkcipher_encrypt = counter_u64_alloc(M_WAITOK); sc->stats_blkcipher_decrypt = counter_u64_alloc(M_WAITOK); sc->stats_hash = counter_u64_alloc(M_WAITOK); sc->stats_hmac = counter_u64_alloc(M_WAITOK); sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK); sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK); sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK); sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK); sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK); sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK); sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK); sc->stats_inflight = counter_u64_alloc(M_WAITOK); sc->stats_mac_error = counter_u64_alloc(M_WAITOK); sc->stats_pad_error = counter_u64_alloc(M_WAITOK); sc->stats_sglist_error = counter_u64_alloc(M_WAITOK); sc->stats_process_error = counter_u64_alloc(M_WAITOK); sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK); ccr_sysctls(sc); return (0); } static int ccr_detach(device_t dev) { struct ccr_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->lock); sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); mtx_destroy(&sc->lock); counter_u64_free(sc->stats_blkcipher_encrypt); counter_u64_free(sc->stats_blkcipher_decrypt); counter_u64_free(sc->stats_hash); counter_u64_free(sc->stats_hmac); counter_u64_free(sc->stats_eta_encrypt); counter_u64_free(sc->stats_eta_decrypt); counter_u64_free(sc->stats_gcm_encrypt); counter_u64_free(sc->stats_gcm_decrypt); counter_u64_free(sc->stats_ccm_encrypt); counter_u64_free(sc->stats_ccm_decrypt); counter_u64_free(sc->stats_wr_nomem); counter_u64_free(sc->stats_inflight); counter_u64_free(sc->stats_mac_error); counter_u64_free(sc->stats_pad_error); counter_u64_free(sc->stats_sglist_error); counter_u64_free(sc->stats_process_error); counter_u64_free(sc->stats_sw_fallback); sglist_free(sc->sg_iv_aad); free(sc->iv_aad_buf, M_CCR); sc->adapter->ccr_softc = NULL; return (0); } static void ccr_init_hash_digest(struct ccr_session *s) { union authctx auth_ctx; struct auth_hash *axf; axf = s->hmac.auth_hash; axf->Init(&auth_ctx); t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); } static bool ccr_aes_check_keylen(int alg, int klen) { switch (klen * 8) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (false); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (false); break; default: return (false); } return (true); } static void ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) { unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; unsigned int opad_present; if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) kbits = (klen / 2) * 8; else kbits = klen * 8; switch (kbits) { case 128: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; break; case 192: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; break; case 256: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: case SCMD_CIPH_MODE_AES_XTS: t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); break; } kctx_len = roundup2(s->blkcipher.key_len, 16); switch (s->mode) { case ETA: mk_size = s->hmac.mk_size; opad_present = 1; iopad_size = roundup2(s->hmac.partial_digest_len, 16); kctx_len += iopad_size * 2; break; case GCM: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; opad_present = 0; kctx_len += GMAC_BLOCK_LEN; break; case CCM: switch (kbits) { case 128: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; break; case 192: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; break; case 256: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; break; default: panic("should not get here"); } opad_present = 0; kctx_len *= 2; break; default: mk_size = CHCR_KEYCTX_NO_KEY; opad_present = 0; break; } kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) | V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); } static bool ccr_auth_supported(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: break; default: return (false); } return (true); } static bool ccr_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); break; default: return (false); } return (ccr_aes_check_keylen(csp->csp_cipher_alg, csp->csp_cipher_klen)); } static int ccr_cipher_mode(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: return (SCMD_CIPH_MODE_AES_CBC); case CRYPTO_AES_ICM: return (SCMD_CIPH_MODE_AES_CTR); case CRYPTO_AES_NIST_GCM_16: return (SCMD_CIPH_MODE_AES_GCM); case CRYPTO_AES_XTS: return (SCMD_CIPH_MODE_AES_XTS); case CRYPTO_AES_CCM_16: return (SCMD_CIPH_MODE_AES_CCM); default: return (SCMD_CIPH_MODE_NOP); } } static int ccr_probesession(device_t dev, const struct crypto_session_params *csp) { unsigned int cipher_mode; if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!ccr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!ccr_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > AES_GMAC_HASH_LEN) return (EINVAL); break; case CRYPTO_AES_CCM_16: if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } if (csp->csp_cipher_klen != 0) { cipher_mode = ccr_cipher_mode(csp); if (cipher_mode == SCMD_CIPH_MODE_NOP) return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Select an available port with the lowest number of active sessions. */ static struct ccr_port * ccr_choose_port(struct ccr_softc *sc) { struct ccr_port *best, *p; int i; mtx_assert(&sc->lock, MA_OWNED); best = NULL; for (i = 0; i < nitems(sc->ports); i++) { p = &sc->ports[i]; /* Ignore non-existent ports. */ if (p->rxq == NULL) continue; /* * XXX: Ignore ports whose queues aren't initialized. * This is racy as the rxq can be destroyed by the * associated VI detaching. Eventually ccr should use * dedicated queues. */ if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) continue; if ((sc->port_mask & (1u << i)) == 0) continue; if (best == NULL || p->active_sessions < best->active_sessions) best = p; } return (best); } static void ccr_delete_session(struct ccr_session *s) { sglist_free(s->sg_input); sglist_free(s->sg_output); sglist_free(s->sg_ulptx); sglist_free(s->sg_dsgl); mtx_destroy(&s->lock); } static int ccr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ccr_softc *sc; struct ccr_session *s; struct auth_hash *auth_hash; unsigned int auth_mode, cipher_mode, mk_size; unsigned int partial_digest_len; switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = SCMD_AUTH_MODE_SHA1; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; partial_digest_len = SHA1_HASH_LEN; break; case CRYPTO_SHA2_224: case CRYPTO_SHA2_224_HMAC: auth_hash = &auth_hash_hmac_sha2_224; auth_mode = SCMD_AUTH_MODE_SHA224; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = SCMD_AUTH_MODE_SHA256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = SCMD_AUTH_MODE_SHA512_384; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = SCMD_AUTH_MODE_SHA512_512; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; default: auth_hash = NULL; auth_mode = SCMD_AUTH_MODE_NOP; mk_size = 0; partial_digest_len = 0; break; } cipher_mode = ccr_cipher_mode(csp); #ifdef INVARIANTS switch (csp->csp_mode) { case CSP_MODE_CIPHER: if (cipher_mode == SCMD_CIPH_MODE_NOP || cipher_mode == SCMD_CIPH_MODE_AES_GCM || cipher_mode == SCMD_CIPH_MODE_AES_CCM) panic("invalid cipher algo"); break; case CSP_MODE_DIGEST: if (auth_mode == SCMD_AUTH_MODE_NOP) panic("invalid auth algo"); break; case CSP_MODE_AEAD: if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && cipher_mode != SCMD_CIPH_MODE_AES_CCM) panic("invalid aead cipher algo"); if (auth_mode != SCMD_AUTH_MODE_NOP) panic("invalid aead auth aglo"); break; case CSP_MODE_ETA: if (cipher_mode == SCMD_CIPH_MODE_NOP || cipher_mode == SCMD_CIPH_MODE_AES_GCM || cipher_mode == SCMD_CIPH_MODE_AES_CCM) panic("invalid cipher algo"); if (auth_mode == SCMD_AUTH_MODE_NOP) panic("invalid auth algo"); break; default: panic("invalid csp mode"); } #endif s = crypto_get_driver_session(cses); mtx_init(&s->lock, "ccr session", NULL, MTX_DEF); s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT); s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT); if (s->sg_input == NULL || s->sg_output == NULL || s->sg_ulptx == NULL || s->sg_dsgl == NULL) { ccr_delete_session(s); return (ENOMEM); } sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); ccr_delete_session(s); return (ENXIO); } s->port = ccr_choose_port(sc); if (s->port == NULL) { mtx_unlock(&sc->lock); ccr_delete_session(s); return (ENXIO); } switch (csp->csp_mode) { case CSP_MODE_AEAD: if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) s->mode = CCM; else s->mode = GCM; break; case CSP_MODE_ETA: s->mode = ETA; break; case CSP_MODE_DIGEST: if (csp->csp_auth_klen != 0) s->mode = HMAC; else s->mode = HASH; break; case CSP_MODE_CIPHER: s->mode = BLKCIPHER; break; } if (s->mode == GCM) { if (csp->csp_auth_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = csp->csp_auth_mlen; t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, s->gmac.ghash_h); } else if (s->mode == CCM) { if (csp->csp_auth_mlen == 0) s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; else s->ccm_mac.hash_len = csp->csp_auth_mlen; } else if (auth_mode != SCMD_AUTH_MODE_NOP) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; s->hmac.mk_size = mk_size; s->hmac.partial_digest_len = partial_digest_len; if (csp->csp_auth_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = csp->csp_auth_mlen; if (csp->csp_auth_key != NULL) t4_init_hmac_digest(auth_hash, partial_digest_len, csp->csp_auth_key, csp->csp_auth_klen, s->hmac.pads); else ccr_init_hash_digest(s); } if (cipher_mode != SCMD_CIPH_MODE_NOP) { s->blkcipher.cipher_mode = cipher_mode; s->blkcipher.iv_len = csp->csp_ivlen; if (csp->csp_cipher_key != NULL) ccr_aes_setkey(s, csp->csp_cipher_key, csp->csp_cipher_klen); } s->port->active_sessions++; mtx_unlock(&sc->lock); return (0); } static void ccr_freesession(device_t dev, crypto_session_t cses) { struct ccr_softc *sc; struct ccr_session *s; sc = device_get_softc(dev); s = crypto_get_driver_session(cses); #ifdef INVARIANTS if (s->pending != 0) device_printf(dev, "session %p freed with %d pending requests\n", s, s->pending); #endif mtx_lock(&sc->lock); s->port->active_sessions--; mtx_unlock(&sc->lock); ccr_delete_session(s); } static int ccr_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ccr_softc *sc; struct ccr_session *s; int error; csp = crypto_get_params(crp->crp_session); s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); mtx_lock(&s->lock); error = ccr_populate_sglist(s->sg_input, &crp->crp_buf); if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf); if (error) { counter_u64_add(sc->stats_sglist_error, 1); goto out; } switch (s->mode) { case HASH: error = ccr_hash(sc, s, crp); if (error == 0) counter_u64_add(sc->stats_hash, 1); break; case HMAC: if (crp->crp_auth_key != NULL) t4_init_hmac_digest(s->hmac.auth_hash, s->hmac.partial_digest_len, crp->crp_auth_key, csp->csp_auth_klen, s->hmac.pads); error = ccr_hash(sc, s, crp); if (error == 0) counter_u64_add(sc->stats_hmac, 1); break; case BLKCIPHER: if (crp->crp_cipher_key != NULL) ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); error = ccr_blkcipher(sc, s, crp); if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) counter_u64_add(sc->stats_blkcipher_encrypt, 1); else counter_u64_add(sc->stats_blkcipher_decrypt, 1); } break; case ETA: if (crp->crp_auth_key != NULL) t4_init_hmac_digest(s->hmac.auth_hash, s->hmac.partial_digest_len, crp->crp_auth_key, csp->csp_auth_klen, s->hmac.pads); if (crp->crp_cipher_key != NULL) ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); error = ccr_eta(sc, s, crp); if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) counter_u64_add(sc->stats_eta_encrypt, 1); else counter_u64_add(sc->stats_eta_decrypt, 1); } break; case GCM: if (crp->crp_cipher_key != NULL) { t4_init_gmac_hash(crp->crp_cipher_key, csp->csp_cipher_klen, s->gmac.ghash_h); ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); } if (crp->crp_payload_length == 0) { mtx_unlock(&s->lock); ccr_gcm_soft(s, crp); return (0); } error = ccr_gcm(sc, s, crp); if (error == EMSGSIZE) { counter_u64_add(sc->stats_sw_fallback, 1); mtx_unlock(&s->lock); ccr_gcm_soft(s, crp); return (0); } if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) counter_u64_add(sc->stats_gcm_encrypt, 1); else counter_u64_add(sc->stats_gcm_decrypt, 1); } break; case CCM: if (crp->crp_cipher_key != NULL) { ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); } error = ccr_ccm(sc, s, crp); if (error == EMSGSIZE) { counter_u64_add(sc->stats_sw_fallback, 1); mtx_unlock(&s->lock); ccr_ccm_soft(s, crp); return (0); } if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) counter_u64_add(sc->stats_ccm_encrypt, 1); else counter_u64_add(sc->stats_ccm_decrypt, 1); } break; } if (error == 0) { #ifdef INVARIANTS s->pending++; #endif counter_u64_add(sc->stats_inflight, 1); } else counter_u64_add(sc->stats_process_error, 1); out: mtx_unlock(&s->lock); if (error) { crp->crp_etype = error; crypto_done(crp); } return (0); } static int do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct ccr_softc *sc = iq->adapter->ccr_softc; struct ccr_session *s; const struct cpl_fw6_pld *cpl; struct cryptop *crp; uint32_t status; int error; if (m != NULL) cpl = mtod(m, const void *); else cpl = (const void *)(rss + 1); crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); s = crypto_get_driver_session(crp->crp_session); status = be64toh(cpl->data[0]); if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) error = EBADMSG; else error = 0; #ifdef INVARIANTS mtx_lock(&s->lock); s->pending--; mtx_unlock(&s->lock); #endif counter_u64_add(sc->stats_inflight, -1); switch (s->mode) { case HASH: case HMAC: error = ccr_hash_done(sc, s, crp, cpl, error); break; case BLKCIPHER: error = ccr_blkcipher_done(sc, s, crp, cpl, error); break; case ETA: error = ccr_eta_done(sc, s, crp, cpl, error); break; case GCM: error = ccr_gcm_done(sc, s, crp, cpl, error); break; case CCM: error = ccr_ccm_done(sc, s, crp, cpl, error); break; } if (error == EBADMSG) { if (CHK_MAC_ERR_BIT(status)) counter_u64_add(sc->stats_mac_error, 1); if (CHK_PAD_ERR_BIT(status)) counter_u64_add(sc->stats_pad_error, 1); } crp->crp_etype = error; crypto_done(crp); m_freem(m); return (0); } static int ccr_modevent(module_t mod, int cmd, void *arg) { switch (cmd) { case MOD_LOAD: t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); return (0); case MOD_UNLOAD: t4_register_cpl_handler(CPL_FW6_PLD, NULL); return (0); default: return (EOPNOTSUPP); } } static device_method_t ccr_methods[] = { DEVMETHOD(device_identify, ccr_identify), DEVMETHOD(device_probe, ccr_probe), DEVMETHOD(device_attach, ccr_attach), DEVMETHOD(device_detach, ccr_detach), DEVMETHOD(cryptodev_probesession, ccr_probesession), DEVMETHOD(cryptodev_newsession, ccr_newsession), DEVMETHOD(cryptodev_freesession, ccr_freesession), DEVMETHOD(cryptodev_process, ccr_process), DEVMETHOD_END }; static driver_t ccr_driver = { "ccr", ccr_methods, sizeof(struct ccr_softc) }; static devclass_t ccr_devclass; DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); MODULE_VERSION(ccr, 1); MODULE_DEPEND(ccr, crypto, 1, 1, 1); MODULE_DEPEND(ccr, t6nex, 1, 1, 1); Index: head/sys/dev/sec/sec.c =================================================================== --- head/sys/dev/sec/sec.c (revision 364798) +++ head/sys/dev/sec/sec.c (revision 364799) @@ -1,1584 +1,1587 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and * 3.0 are supported. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include static int sec_probe(device_t dev); static int sec_attach(device_t dev); static int sec_detach(device_t dev); static int sec_suspend(device_t dev); static int sec_resume(device_t dev); static int sec_shutdown(device_t dev); static void sec_primary_intr(void *arg); static void sec_secondary_intr(void *arg); static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname); static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname); static int sec_controller_reset(struct sec_softc *sc); static int sec_channel_reset(struct sec_softc *sc, int channel, int full); static int sec_init(struct sec_softc *sc); static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size); static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi); static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); static void sec_enqueue(struct sec_softc *sc); static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel); static int sec_eu_channel(struct sec_softc *sc, int eu); static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize); static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); static int sec_probesession(device_t dev, const struct crypto_session_params *csp); static int sec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp); static int sec_process(device_t dev, struct cryptop *crp, int hint); static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp); static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp); static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); /* AESU */ static bool sec_aesu_newsession(const struct crypto_session_params *csp); static int sec_aesu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp); /* MDEU */ static bool sec_mdeu_can_handle(u_int alg); static int sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, u_int *hashlen); static bool sec_mdeu_newsession(const struct crypto_session_params *csp); static int sec_mdeu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp); static device_method_t sec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sec_probe), DEVMETHOD(device_attach, sec_attach), DEVMETHOD(device_detach, sec_detach), DEVMETHOD(device_suspend, sec_suspend), DEVMETHOD(device_resume, sec_resume), DEVMETHOD(device_shutdown, sec_shutdown), /* Crypto methods */ DEVMETHOD(cryptodev_probesession, sec_probesession), DEVMETHOD(cryptodev_newsession, sec_newsession), DEVMETHOD(cryptodev_process, sec_process), DEVMETHOD_END }; static driver_t sec_driver = { "sec", sec_methods, sizeof(struct sec_softc), }; static devclass_t sec_devclass; DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0); MODULE_DEPEND(sec, crypto, 1, 1, 1); static struct sec_eu_methods sec_eus[] = { { sec_aesu_newsession, sec_aesu_make_desc, }, { sec_mdeu_newsession, sec_mdeu_make_desc, }, { NULL, NULL } }; static inline void sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (dma_mem->dma_vaddr != NULL) bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); } static inline void * sec_get_pointer_data(struct sec_desc *desc, u_int n) { return (desc->sd_ptr_dmem[n].dma_vaddr); } static int sec_probe(device_t dev) { struct sec_softc *sc; uint64_t id; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) return (ENXIO); sc = device_get_softc(dev); sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); id = SEC_READ(sc, SEC_ID); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); switch (id) { case SEC_20_ID: device_set_desc(dev, "Freescale Security Engine 2.0"); sc->sc_version = 2; break; case SEC_30_ID: device_set_desc(dev, "Freescale Security Engine 3.0"); sc->sc_version = 3; break; case SEC_31_ID: device_set_desc(dev, "Freescale Security Engine 3.1"); sc->sc_version = 3; break; default: device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id); return (ENXIO); } return (0); } static int sec_attach(device_t dev) { struct sec_softc *sc; struct sec_hw_lt *lt; int error = 0; int i; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_blocked = 0; sc->sc_shutdown = 0; sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver ID!\n"); return (ENXIO); } /* Init locks */ mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), "SEC Controller lock", MTX_DEF); mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), "SEC Descriptors lock", MTX_DEF); /* Allocate I/O memory for SEC registers */ sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { device_printf(dev, "could not allocate I/O memory!\n"); goto fail1; } sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* Setup interrupts */ sc->sc_pri_irid = 0; error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, &sc->sc_pri_irid, sec_primary_intr, "primary"); if (error) goto fail2; if (sc->sc_version == 3) { sc->sc_sec_irid = 1; error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, &sc->sc_sec_irid, sec_secondary_intr, "secondary"); if (error) goto fail3; } /* Alloc DMA memory for descriptors and link tables */ error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); if (error) goto fail4; error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); if (error) goto fail5; /* Fill in descriptors and link tables */ for (i = 0; i < SEC_DESCRIPTORS; i++) { sc->sc_desc[i].sd_desc = (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + (i * sizeof(struct sec_hw_desc)); } for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { sc->sc_lt[i].sl_lt = (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + (i * sizeof(struct sec_hw_lt)); } /* Last entry in link table is used to create a circle */ lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; lt->shl_length = 0; lt->shl_r = 0; lt->shl_n = 1; lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; /* Init descriptor and link table queues pointers */ SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); /* Create masks for fast checks */ sc->sc_int_error_mask = 0; for (i = 0; i < SEC_CHANNELS; i++) sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); switch (sc->sc_version) { case 2: sc->sc_channel_idle_mask = (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); break; case 3: sc->sc_channel_idle_mask = (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); break; } /* Init hardware */ error = sec_init(sc); if (error) goto fail6; return (0); fail6: sec_free_dma_mem(&(sc->sc_lt_dmem)); fail5: sec_free_dma_mem(&(sc->sc_desc_dmem)); fail4: sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); fail3: sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); fail2: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); fail1: mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); return (ENXIO); } static int sec_detach(device_t dev) { struct sec_softc *sc = device_get_softc(dev); int i, error, timeout = SEC_TIMEOUT; /* Prepare driver to shutdown */ SEC_LOCK(sc, descriptors); sc->sc_shutdown = 1; SEC_UNLOCK(sc, descriptors); /* Wait until all queued processing finishes */ while (1) { SEC_LOCK(sc, descriptors); i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); SEC_UNLOCK(sc, descriptors); if (i == 0) break; if (timeout < 0) { device_printf(dev, "queue flush timeout!\n"); /* DMA can be still active - stop it */ for (i = 0; i < SEC_CHANNELS; i++) sec_channel_reset(sc, i, 1); break; } timeout -= 1000; DELAY(1000); } /* Disable interrupts */ SEC_WRITE(sc, SEC_IER, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA memory */ for (i = 0; i < SEC_DESCRIPTORS; i++) SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); sec_free_dma_mem(&(sc->sc_lt_dmem)); sec_free_dma_mem(&(sc->sc_desc_dmem)); /* Release interrupts */ sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); /* Release memory */ if (sc->sc_rres) { error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); if (error) device_printf(dev, "bus_release_resource() failed for" " I/O memory, error %d\n", error); sc->sc_rres = NULL; } mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); return (0); } static int sec_suspend(device_t dev) { return (0); } static int sec_resume(device_t dev) { return (0); } static int sec_shutdown(device_t dev) { return (0); } static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname) { int error; (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, RF_ACTIVE); if ((*ires) == NULL) { device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); return (ENXIO); } error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, sc, ihand); if (error) { device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) device_printf(sc->sc_dev, "could not release %s IRQ\n", iname); (*ires) = NULL; return (error); } return (0); } static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname) { int error; if (ires == NULL) return; error = bus_teardown_intr(sc->sc_dev, ires, ihand); if (error) device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" " IRQ, error %d\n", iname, error); error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); if (error) device_printf(sc->sc_dev, "bus_release_resource() failed for %s" " IRQ, error %d\n", iname, error); } static void sec_primary_intr(void *arg) { struct sec_session *ses; struct sec_softc *sc = arg; struct sec_desc *desc; struct cryptop *crp; uint64_t isr; uint8_t hash[HASH_MAX_LEN]; int i, wakeup = 0; SEC_LOCK(sc, controller); /* Check for errors */ isr = SEC_READ(sc, SEC_ISR); if (isr & sc->sc_int_error_mask) { /* Check each channel for error */ for (i = 0; i < SEC_CHANNELS; i++) { if ((isr & SEC_INT_CH_ERR(i)) == 0) continue; device_printf(sc->sc_dev, "I/O error on channel %i!\n", i); /* Find and mark problematic descriptor */ desc = sec_find_desc(sc, SEC_READ(sc, SEC_CHAN_CDPR(i))); if (desc != NULL) desc->sd_error = EIO; /* Do partial channel reset */ sec_channel_reset(sc, i, 0); } } /* ACK interrupt */ SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); SEC_UNLOCK(sc, controller); SEC_LOCK(sc, descriptors); /* Handle processed descriptors */ SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); while (SEC_QUEUED_DESC_CNT(sc) > 0) { desc = SEC_GET_QUEUED_DESC(sc); if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { SEC_PUT_BACK_QUEUED_DESC(sc); break; } SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); crp = desc->sd_crp; crp->crp_etype = desc->sd_error; if (crp->crp_etype == 0) { ses = crypto_get_driver_session(crp->crp_session); if (ses->ss_mlen != 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ss_mlen, hash); if (timingsafe_bcmp( desc->sd_desc->shd_digest, hash, ses->ss_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, ses->ss_mlen, desc->sd_desc->shd_digest); } } crypto_done(desc->sd_crp); SEC_DESC_FREE_POINTERS(desc); SEC_DESC_FREE_LT(sc, desc); SEC_DESC_QUEUED2FREE(sc); } SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (!sc->sc_shutdown) { wakeup = sc->sc_blocked; sc->sc_blocked = 0; } SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); if (wakeup) crypto_unblock(sc->sc_cid, wakeup); } static void sec_secondary_intr(void *arg) { struct sec_softc *sc = arg; device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); sec_primary_intr(arg); } static int sec_controller_reset(struct sec_softc *sc) { int timeout = SEC_TIMEOUT; /* Reset Controller */ SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "device reset!\n"); return (ETIMEDOUT); } } return (0); } static int sec_channel_reset(struct sec_softc *sc, int channel, int full) { int timeout = SEC_TIMEOUT; uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; uint64_t reg; /* Reset Channel */ reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "channel reset!\n"); return (ETIMEDOUT); } } if (full) { reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; switch(sc->sc_version) { case 2: reg |= SEC_CHAN_CCR_CDWE; break; case 3: reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; break; } SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); } return (0); } static int sec_init(struct sec_softc *sc) { uint64_t reg; int error, i; /* Reset controller twice to clear all pending interrupts */ error = sec_controller_reset(sc); if (error) return (error); error = sec_controller_reset(sc); if (error) return (error); /* Reset channels */ for (i = 0; i < SEC_CHANNELS; i++) { error = sec_channel_reset(sc, i, 1); if (error) return (error); } /* Enable Interrupts */ reg = SEC_INT_ITO; for (i = 0; i < SEC_CHANNELS; i++) reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); SEC_WRITE(sc, SEC_IER, reg); return (error); } static void sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_dma_mem *dma_mem = arg; if (error) return; KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); dma_mem->dma_paddr = segs->ds_addr; } static void sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_desc_map_info *sdmi = arg; struct sec_softc *sc = sdmi->sdmi_sc; struct sec_lt *lt = NULL; bus_addr_t addr; bus_size_t size; int i; SEC_LOCK_ASSERT(sc, descriptors); if (error) return; for (i = 0; i < nseg; i++) { addr = segs[i].ds_addr; size = segs[i].ds_len; /* Skip requested offset */ if (sdmi->sdmi_offset >= size) { sdmi->sdmi_offset -= size; continue; } addr += sdmi->sdmi_offset; size -= sdmi->sdmi_offset; sdmi->sdmi_offset = 0; /* Do not link more than requested */ if (sdmi->sdmi_size < size) size = sdmi->sdmi_size; lt = SEC_ALLOC_LT_ENTRY(sc); lt->sl_lt->shl_length = size; lt->sl_lt->shl_r = 0; lt->sl_lt->shl_n = 0; lt->sl_lt->shl_ptr = addr; if (sdmi->sdmi_lt_first == NULL) sdmi->sdmi_lt_first = lt; sdmi->sdmi_lt_used += 1; if ((sdmi->sdmi_size -= size) == 0) break; } sdmi->sdmi_lt_last = lt; } static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } dma_mem->dma_is_map = 0; return (0); err3: bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); err2: bus_dma_tag_destroy(dma_mem->dma_tag); err1: dma_mem->dma_vaddr = NULL; return(error); } static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_CONTIG: break; case CRYPTO_BUF_UIO: size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; break; case CRYPTO_BUF_MBUF: size = m_length(crp->crp_buf.cb_mbuf, NULL); break; + case CRYPTO_BUF_VMPAGE: + size = PAGE_SIZE - cb->cb_vm_page_offset; + break; default: return (EINVAL); } error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ SEC_FREE_LT_CNT(sc), /* nsegments */ SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); dma_mem->dma_vaddr = NULL; return (error); } error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to create DMA map, error %i!" "\n", error); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i!\n", error); bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } dma_mem->dma_is_map = 1; dma_mem->dma_vaddr = crp; return (0); } static void sec_free_dma_mem(struct sec_dma_mem *dma_mem) { /* Check for double free */ if (dma_mem->dma_vaddr == NULL) return; bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); if (dma_mem->dma_is_map) bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); else bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); dma_mem->dma_vaddr = NULL; } static int sec_eu_channel(struct sec_softc *sc, int eu) { uint64_t reg; int channel = 0; SEC_LOCK_ASSERT(sc, controller); reg = SEC_READ(sc, SEC_EUASR); switch (eu) { case SEC_EU_AFEU: channel = SEC_EUASR_AFEU(reg); break; case SEC_EU_DEU: channel = SEC_EUASR_DEU(reg); break; case SEC_EU_MDEU_A: case SEC_EU_MDEU_B: channel = SEC_EUASR_MDEU(reg); break; case SEC_EU_RNGU: channel = SEC_EUASR_RNGU(reg); break; case SEC_EU_PKEU: channel = SEC_EUASR_PKEU(reg); break; case SEC_EU_AESU: channel = SEC_EUASR_AESU(reg); break; case SEC_EU_KEU: channel = SEC_EUASR_KEU(reg); break; case SEC_EU_CRCU: channel = SEC_EUASR_CRCU(reg); break; } return (channel - 1); } static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) { u_int fflvl = SEC_MAX_FIFO_LEVEL; uint64_t reg; int i; SEC_LOCK_ASSERT(sc, controller); /* Find free channel if have not got one */ if (channel < 0) { for (i = 0; i < SEC_CHANNELS; i++) { reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); if ((reg & sc->sc_channel_idle_mask) == 0) { channel = i; break; } } } /* There is no free channel */ if (channel < 0) return (-1); /* Check FIFO level on selected channel */ reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); switch(sc->sc_version) { case 2: fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; break; case 3: fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; break; } if (fflvl >= SEC_MAX_FIFO_LEVEL) return (-1); /* Enqueue descriptor in channel */ SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); return (channel); } static void sec_enqueue(struct sec_softc *sc) { struct sec_desc *desc; int ch0, ch1; SEC_LOCK(sc, descriptors); SEC_LOCK(sc, controller); while (SEC_READY_DESC_CNT(sc) > 0) { desc = SEC_GET_READY_DESC(sc); ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); /* * Both EU are used by the same channel. * Enqueue descriptor in channel used by busy EUs. */ if (ch0 >= 0 && ch0 == ch1) { if (sec_enqueue_desc(sc, desc, ch0) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Only one EU is free. * Enqueue descriptor in channel used by busy EU. */ if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Both EU are free. * Enqueue descriptor in first free channel. */ if (ch0 < 0 && ch1 < 0) { if (sec_enqueue_desc(sc, desc, -1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* Current descriptor can not be queued at the moment */ SEC_PUT_BACK_READY_DESC(sc); break; } SEC_UNLOCK(sc, controller); SEC_UNLOCK(sc, descriptors); } static struct sec_desc * sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) { struct sec_desc *desc = NULL; int i; SEC_LOCK_ASSERT(sc, descriptors); for (i = 0; i < SEC_CHANNELS; i++) { if (sc->sc_desc[i].sd_desc_paddr == paddr) { desc = &(sc->sc_desc[i]); break; } } return (desc); } static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize) { struct sec_hw_desc_ptr *ptr; SEC_LOCK_ASSERT(sc, descriptors); ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 0; ptr->shdp_ptr = data; return (0); } static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize) { struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; struct sec_hw_desc_ptr *ptr; int error; SEC_LOCK_ASSERT(sc, descriptors); error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize, &sdmi); if (error) return (error); sdmi.sdmi_lt_last->sl_lt->shl_r = 1; desc->sd_lt_used += sdmi.sdmi_lt_used; ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 1; ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; return (0); } static bool sec_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: /* AESU */ if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; default: return (false); } if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN) return (false); return (true); } static bool sec_auth_supported(struct sec_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (sc->sc_version < 3) return (false); /* FALLTHROUGH */ case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: if (csp->csp_auth_klen > SEC_MAX_KEY_LEN) return (false); break; case CRYPTO_SHA1: break; default: return (false); } return (true); } static int sec_probesession(device_t dev, const struct crypto_session_params *csp) { struct sec_softc *sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!sec_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!sec_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int sec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct sec_eu_methods *eu = sec_eus; struct sec_session *ses; ses = crypto_get_driver_session(cses); /* Find EU for this session */ while (eu->sem_make_desc != NULL) { if (eu->sem_newsession(csp)) break; eu++; } KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session")); /* Save cipher key */ if (csp->csp_cipher_key != NULL) memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen); /* Save digest key */ if (csp->csp_auth_key != NULL) memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_auth_alg != 0) { if (csp->csp_auth_mlen == 0) ses->ss_mlen = crypto_auth_hash(csp)->hashsize; else ses->ss_mlen = csp->csp_auth_mlen; } return (0); } static int sec_process(device_t dev, struct cryptop *crp, int hint) { struct sec_softc *sc = device_get_softc(dev); struct sec_desc *desc = NULL; const struct crypto_session_params *csp; struct sec_session *ses; int error = 0; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* Check for input length */ if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } SEC_LOCK(sc, descriptors); SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Block driver if there is no free descriptors or we are going down */ if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { sc->sc_blocked |= CRYPTO_SYMQ; SEC_UNLOCK(sc, descriptors); return (ERESTART); } /* Prepare descriptor */ desc = SEC_GET_FREE_DESC(sc); desc->sd_lt_used = 0; desc->sd_error = 0; desc->sd_crp = crp; if (csp->csp_cipher_alg != 0) crypto_read_iv(crp, desc->sd_desc->shd_iv); if (crp->crp_cipher_key != NULL) memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen); if (crp->crp_auth_key != NULL) memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen); memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen); memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen); error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp); if (error) { SEC_DESC_FREE_POINTERS(desc); SEC_DESC_PUT_BACK_LT(sc, desc); SEC_PUT_BACK_FREE_DESC(sc); SEC_UNLOCK(sc, descriptors); crp->crp_etype = error; crypto_done(crp); return (0); } /* * Skip DONE interrupt if this is not last request in burst, but only * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE * signaling on each descriptor. */ if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) desc->sd_desc->shd_dn = 0; else desc->sd_desc->shd_dn = 1; SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_FREE2READY(sc); SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); return (0); } static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: IV IN */ error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); if (error) return (error); /* Pointer 3: Data IN */ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 4: Data OUT */ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 5: IV OUT (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 5, 0, 0); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (error); } static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_mdeu_config(csp, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_HMAC_SNOOP; hd->shd_eu_sel1 = eu; hd->shd_mode1 = mode; /* Pointer 0: HMAC Key */ error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); if (error) return (error); /* Pointer 1: HMAC-Only Data IN */ error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); if (error) return (error); /* Pointer 3: IV IN */ error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); if (error) return (error); /* Pointer 4: Data IN */ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 5: Data OUT */ error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 6: HMAC OUT */ error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_digest), hashlen); return (error); } /* AESU */ static bool sec_aesu_newsession(const struct crypto_session_params *csp) { return (csp->csp_cipher_alg == CRYPTO_AES_CBC); } static int sec_aesu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_eu_sel0 = SEC_EU_AESU; hd->shd_mode0 = SEC_AESU_MODE_CBC; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { hd->shd_mode0 |= SEC_AESU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (csp->csp_mode == CSP_MODE_ETA) error = sec_build_common_s_desc(sc, desc, csp, crp); else error = sec_build_common_ns_desc(sc, desc, csp, crp); return (error); } /* MDEU */ static bool sec_mdeu_can_handle(u_int alg) { switch (alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: return (true); default: return (false); } } static int sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, u_int *hashlen) { *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; *eu = SEC_EU_NONE; switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_SHA1: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_SHA1; *hashlen = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; *eu = SEC_EU_MDEU_A; break; case CRYPTO_SHA2_384_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; *eu = SEC_EU_MDEU_B; break; case CRYPTO_SHA2_512_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; *eu = SEC_EU_MDEU_B; break; default: return (EINVAL); } if (*mode & SEC_MDEU_MODE_HMAC) *hashlen = SEC_HMAC_HASH_LEN; return (0); } static bool sec_mdeu_newsession(const struct crypto_session_params *csp) { return (sec_mdeu_can_handle(csp->csp_auth_alg)); } static int sec_mdeu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_mdeu_config(csp, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel0 = eu; hd->shd_mode0 = mode; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: Context In (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 1, 0, 0); if (error) return (error); /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); else error = sec_make_pointer_direct(sc, desc, 2, 0, 0); if (error) return (error); /* Pointer 3: Input Data */ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 4: NULL */ error = sec_make_pointer_direct(sc, desc, 4, 0, 0); if (error) return (error); /* Pointer 5: Hash out */ error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_digest), hashlen); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (0); } Index: head/sys/kern/subr_bus_dma.c =================================================================== --- head/sys/kern/subr_bus_dma.c (revision 364798) +++ head/sys/kern/subr_bus_dma.c (revision 364799) @@ -1,697 +1,702 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 EMC Corp. * All rights reserved. * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Load up data starting at offset within a region specified by a * list of virtual address ranges until either length or the region * are exhausted. */ static int _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, int flags, size_t offset, size_t length) { int error; error = 0; for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { char *addr; size_t ds_len; KASSERT((offset < list->ds_len), ("Invalid mid-segment offset")); addr = (char *)(uintptr_t)list->ds_addr + offset; ds_len = list->ds_len - offset; offset = 0; if (ds_len > length) ds_len = length; length -= ds_len; KASSERT((ds_len != 0), ("Segment length is zero")); error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load a list of physical addresses. */ static int _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) { int error; error = 0; for (; sglist_cnt > 0; sglist_cnt--, list++) { error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load an unmapped mbuf */ static int _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) { int error, i, off, len, pglen, pgoff, seglen, segoff; M_ASSERTEXTPG(m); len = m->m_len; error = 0; /* Skip over any data removed from the front. */ off = mtod(m, vm_offset_t); if (m->m_epg_hdrlen != 0) { if (off >= m->m_epg_hdrlen) { off -= m->m_epg_hdrlen; } else { seglen = m->m_epg_hdrlen - off; segoff = off; seglen = min(seglen, len); off = 0; len -= seglen; error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_hdr[segoff], seglen, kernel_pmap, flags, segs, nsegs); } } pgoff = m->m_epg_1st_off; for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { pglen = m_epg_pagelen(m, i, pgoff); if (off >= pglen) { off -= pglen; pgoff = 0; continue; } seglen = pglen - off; segoff = pgoff + off; off = 0; seglen = min(seglen, len); len -= seglen; error = _bus_dmamap_load_phys(dmat, map, m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); pgoff = 0; }; if (len != 0 && error == 0) { KASSERT((off + len) <= m->m_epg_trllen, ("off + len > trail (%d + %d > %d)", off, len, m->m_epg_trllen)); error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_trail[off], len, kernel_pmap, flags, segs, nsegs); } return (error); } /* * Load an mbuf chain. */ static int _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { struct mbuf *m; int error; error = 0; for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { if ((m->m_flags & M_EXTPG) != 0) error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, flags); else error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); } } CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, *nsegs); return (error); } /* * Load from block io. */ static int _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, int *nsegs, int flags) { if ((bio->bio_flags & BIO_VLIST) != 0) { bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, kernel_pmap, nsegs, flags, bio->bio_ma_offset, bio->bio_bcount)); } if ((bio->bio_flags & BIO_UNMAPPED) != 0) return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); } int bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { vm_paddr_t paddr; bus_size_t len; int error, i; error = 0; for (i = 0; tlen > 0; i++, tlen -= len) { len = min(PAGE_SIZE - ma_offs, tlen); paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; error = _bus_dmamap_load_phys(dmat, map, paddr, len, flags, segs, segp); if (error != 0) break; ma_offs = 0; } return (error); } /* * Load a cam control block. */ static int _bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, int *nsegs, int flags) { struct ccb_hdr *ccb_h; void *data_ptr; int error; uint32_t dxfer_len; uint16_t sglist_cnt; error = 0; ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SCSI_IO: { struct ccb_scsiio *csio; csio = &ccb->csio; data_ptr = csio->data_ptr; dxfer_len = csio->dxfer_len; sglist_cnt = csio->sglist_cnt; break; } case XPT_CONT_TARGET_IO: { struct ccb_scsiio *ctio; ctio = &ccb->ctio; data_ptr = ctio->data_ptr; dxfer_len = ctio->dxfer_len; sglist_cnt = ctio->sglist_cnt; break; } case XPT_ATA_IO: { struct ccb_ataio *ataio; ataio = &ccb->ataio; data_ptr = ataio->data_ptr; dxfer_len = ataio->dxfer_len; sglist_cnt = 0; break; } case XPT_NVME_IO: case XPT_NVME_ADMIN: { struct ccb_nvmeio *nvmeio; nvmeio = &ccb->nvmeio; data_ptr = nvmeio->data_ptr; dxfer_len = nvmeio->dxfer_len; sglist_cnt = nvmeio->sglist_cnt; break; } default: panic("_bus_dmamap_load_ccb: Unsupported func code %d", ccb_h->func_code); } switch ((ccb_h->flags & CAM_DATA_MASK)) { case CAM_DATA_VADDR: error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, kernel_pmap, flags, NULL, nsegs); break; case CAM_DATA_PADDR: error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, nsegs); break; case CAM_DATA_SG: error = _bus_dmamap_load_vlist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, nsegs, flags, 0, dxfer_len); break; case CAM_DATA_SG_PADDR: error = _bus_dmamap_load_plist(dmat, map, (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); break; case CAM_DATA_BIO: error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, nsegs, flags); break; default: panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", ccb_h->flags); } return (error); } /* * Load a uio. */ static int _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, int *nsegs, int flags) { bus_size_t resid; bus_size_t minlen; struct iovec *iov; pmap_t pmap; caddr_t addr; int error, i; if (uio->uio_segflg == UIO_USERSPACE) { KASSERT(uio->uio_td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); } else pmap = kernel_pmap; resid = uio->uio_resid; iov = uio->uio_iov; error = 0; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ addr = (caddr_t) iov[i].iov_base; minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; if (minlen > 0) { error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, pmap, flags, NULL, nsegs); resid -= minlen; } } return (error); } /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_vaddr(buf, buflen); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, flags, NULL, &nsegs); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; M_ASSERTPKTHDR(m0); flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); ++nsegs; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { int error; flags |= BUS_DMA_NOWAIT; *nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); ++*nsegs; _bus_dmamap_complete(dmat, map, segs, *nsegs, error); return (error); } int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); nsegs++; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct ccb_hdr *ccb_h; struct memdesc mem; int error; int nsegs; ccb_h = &ccb->ccb_h; if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { callback(callback_arg, NULL, 0, 0); return (0); } if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_ccb(ccb); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, error); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_bio(bio); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, error); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; if ((flags & BUS_DMA_NOWAIT) == 0) _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); nsegs = -1; error = 0; switch (mem->md_type) { case MEMDESC_VADDR: error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); break; case MEMDESC_PADDR: error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, mem->md_opaque, flags, NULL, &nsegs); break; case MEMDESC_VLIST: error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); break; case MEMDESC_PLIST: error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, mem->md_opaque, &nsegs, flags); break; case MEMDESC_BIO: error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, &nsegs, flags); break; case MEMDESC_UIO: error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, &nsegs, flags); break; case MEMDESC_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, NULL, &nsegs, flags); break; case MEMDESC_CCB: error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, flags); break; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, struct crypto_buffer *cb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; flags |= BUS_DMA_NOWAIT; nsegs = -1; error = 0; switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); break; case CRYPTO_BUF_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_UIO: error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; + case CRYPTO_BUF_VMPAGE: + error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, + cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, + &nsegs); + break; default: error = EINVAL; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, callback_arg, flags)); } Index: head/sys/opencrypto/criov.c =================================================================== --- head/sys/opencrypto/criov.c (revision 364798) +++ head/sys/opencrypto/criov.c (revision 364799) @@ -1,558 +1,769 @@ /* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */ /*- * Copyright (c) 1999 Theo de Raadt * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include +#include + +#include +#include +#include + #include +SDT_PROVIDER_DECLARE(opencrypto); + /* - * This macro is only for avoiding code duplication, as we need to skip - * given number of bytes in the same way in three functions below. + * These macros are only for avoiding code duplication, as we need to skip + * given number of bytes in the same way in several functions below. */ #define CUIO_SKIP() do { \ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ while (off > 0) { \ KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \ if (off < iov->iov_len) \ break; \ off -= iov->iov_len; \ iol--; \ iov++; \ } \ } while (0) +#define CVM_PAGE_SKIP() do { \ + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ + while (off > 0) { \ + if (off < PAGE_SIZE) \ + break; \ + processed += PAGE_SIZE - off; \ + off -= PAGE_SIZE - off; \ + pages++; \ + } \ +} while (0) + static void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) { struct iovec *iov = uio->uio_iov; int iol = uio->uio_iovcnt; unsigned count; CUIO_SKIP(); while (len > 0) { KASSERT(iol >= 0, ("%s: empty", __func__)); count = min(iov->iov_len - off, len); bcopy(((caddr_t)iov->iov_base) + off, cp, count); len -= count; cp += count; off = 0; iol--; iov++; } } static void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp) { struct iovec *iov = uio->uio_iov; int iol = uio->uio_iovcnt; unsigned count; CUIO_SKIP(); while (len > 0) { KASSERT(iol >= 0, ("%s: empty", __func__)); count = min(iov->iov_len - off, len); bcopy(cp, ((caddr_t)iov->iov_base) + off, count); len -= count; cp += count; off = 0; iol--; iov++; } } /* * Return the index and offset of location in iovec list. */ static int cuio_getptr(struct uio *uio, int loc, int *off) { int ind, len; ind = 0; while (loc >= 0 && ind < uio->uio_iovcnt) { len = uio->uio_iov[ind].iov_len; if (len > loc) { *off = loc; return (ind); } loc -= len; ind++; } if (ind > 0 && loc == 0) { ind--; *off = uio->uio_iov[ind].iov_len; return (ind); } return (-1); } +#if CRYPTO_MAY_HAVE_VMPAGE +/* + * Apply function f to the data in a vm_page_t list starting "off" bytes from + * the beginning, continuing for "len" bytes. + */ +static int +cvm_page_apply(vm_page_t *pages, int off, int len, + int (*f)(void *, const void *, u_int), void *arg) +{ + int processed = 0; + unsigned count; + int rval; + + CVM_PAGE_SKIP(); + while (len > 0) { + char *kaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)); + count = min(PAGE_SIZE - off, len); + rval = (*f)(arg, kaddr + off, count); + if (rval) + return (rval); + len -= count; + processed += count; + off = 0; + pages++; + } + return (0); +} + +static inline void * +cvm_page_contiguous_segment(vm_page_t *pages, size_t skip, int len) +{ + if ((skip + len - 1) / PAGE_SIZE > skip / PAGE_SIZE) + return (NULL); + + pages += (skip / PAGE_SIZE); + skip -= rounddown(skip, PAGE_SIZE); + return (((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages))) + skip); +} + +/* + * Copy len bytes of data from the vm_page_t array, skipping the first off + * bytes, into the pointer cp. Return the number of bytes skipped and copied. + * Does not verify the length of the array. + */ +static int +cvm_page_copyback(vm_page_t *pages, int off, int len, c_caddr_t cp) +{ + int processed = 0; + unsigned count; + + CVM_PAGE_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy(cp, (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off, + count); + len -= count; + cp += count; + processed += count; + off = 0; + pages++; + } + return (processed); +} + +/* + * Copy len bytes of data from the pointer cp into the vm_page_t array, + * skipping the first off bytes, Return the number of bytes skipped and copied. + * Does not verify the length of the array. + */ +static int +cvm_page_copydata(vm_page_t *pages, int off, int len, caddr_t cp) +{ + int processed = 0; + unsigned count; + + CVM_PAGE_SKIP(); + while (len > 0) { + count = min(PAGE_SIZE - off, len); + bcopy(((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*pages)) + off), cp, + count); + len -= count; + cp += count; + processed += count; + off = 0; + pages++; + } + return processed; +} +#endif /* CRYPTO_MAY_HAVE_VMPAGE */ + void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb) { memset(cc, 0, sizeof(*cc)); cc->cc_type = cb->cb_type; switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: cc->cc_buf = cb->cb_buf; cc->cc_buf_len = cb->cb_buf_len; break; case CRYPTO_BUF_MBUF: cc->cc_mbuf = cb->cb_mbuf; break; + case CRYPTO_BUF_VMPAGE: + cc->cc_vmpage = cb->cb_vm_page; + cc->cc_buf_len = cb->cb_vm_page_len; + cc->cc_offset = cb->cb_vm_page_offset; + break; case CRYPTO_BUF_UIO: cc->cc_iov = cb->cb_uio->uio_iov; break; default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cb->cb_type); #endif break; } } +SDT_PROBE_DEFINE2(opencrypto, criov, cursor_advance, vmpage, "struct crypto_buffer_cursor*", "size_t"); + void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount) { size_t remain; switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: MPASS(cc->cc_buf_len >= amount); cc->cc_buf += amount; cc->cc_buf_len -= amount; break; case CRYPTO_BUF_MBUF: for (;;) { remain = cc->cc_mbuf->m_len - cc->cc_offset; if (amount < remain) { cc->cc_offset += amount; break; } amount -= remain; cc->cc_mbuf = cc->cc_mbuf->m_next; cc->cc_offset = 0; if (amount == 0) break; } break; + case CRYPTO_BUF_VMPAGE: + for (;;) { + SDT_PROBE2(opencrypto, criov, cursor_advance, vmpage, + cc, amount); + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + if (amount < remain) { + cc->cc_buf_len -= amount; + cc->cc_offset += amount; + break; + } + cc->cc_buf_len -= remain; + amount -= remain; + cc->cc_vmpage++; + cc->cc_offset = 0; + if (amount == 0 || cc->cc_buf_len == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { remain = cc->cc_iov->iov_len - cc->cc_offset; if (amount < remain) { cc->cc_offset += amount; break; } amount -= remain; cc->cc_iov++; cc->cc_offset = 0; if (amount == 0) break; } break; default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cc->cc_type); #endif break; } } void * crypto_cursor_segbase(struct crypto_buffer_cursor *cc) { switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: return (cc->cc_buf); case CRYPTO_BUF_MBUF: if (cc->cc_mbuf == NULL) return (NULL); KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, ("%s: not supported for unmapped mbufs", __func__)); return (mtod(cc->cc_mbuf, char *) + cc->cc_offset); + case CRYPTO_BUF_VMPAGE: + return ((char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_vmpage)) + cc->cc_offset); case CRYPTO_BUF_UIO: return ((char *)cc->cc_iov->iov_base + cc->cc_offset); default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cc->cc_type); #endif return (NULL); } } size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc) { switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: return (cc->cc_buf_len); + case CRYPTO_BUF_VMPAGE: + return (PAGE_SIZE - cc->cc_offset); case CRYPTO_BUF_MBUF: if (cc->cc_mbuf == NULL) return (0); return (cc->cc_mbuf->m_len - cc->cc_offset); case CRYPTO_BUF_UIO: return (cc->cc_iov->iov_len - cc->cc_offset); default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cc->cc_type); #endif return (0); } } void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, const void *vsrc) { size_t remain, todo; const char *src; char *dst; src = vsrc; switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: MPASS(cc->cc_buf_len >= size); memcpy(cc->cc_buf, src, size); cc->cc_buf += size; cc->cc_buf_len -= size; break; case CRYPTO_BUF_MBUF: for (;;) { KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, ("%s: not supported for unmapped mbufs", __func__)); dst = mtod(cc->cc_mbuf, char *) + cc->cc_offset; remain = cc->cc_mbuf->m_len - cc->cc_offset; todo = MIN(remain, size); memcpy(dst, src, todo); src += todo; if (todo < remain) { cc->cc_offset += todo; break; } size -= todo; cc->cc_mbuf = cc->cc_mbuf->m_next; cc->cc_offset = 0; if (size == 0) break; } break; + case CRYPTO_BUF_VMPAGE: + for (;;) { + dst = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_vmpage)) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_vmpage++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { dst = (char *)cc->cc_iov->iov_base + cc->cc_offset; remain = cc->cc_iov->iov_len - cc->cc_offset; todo = MIN(remain, size); memcpy(dst, src, todo); src += todo; if (todo < remain) { cc->cc_offset += todo; break; } size -= todo; cc->cc_iov++; cc->cc_offset = 0; if (size == 0) break; } break; default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cc->cc_type); #endif break; } } void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst) { size_t remain, todo; const char *src; char *dst; dst = vdst; switch (cc->cc_type) { case CRYPTO_BUF_CONTIG: MPASS(cc->cc_buf_len >= size); memcpy(dst, cc->cc_buf, size); cc->cc_buf += size; cc->cc_buf_len -= size; break; case CRYPTO_BUF_MBUF: for (;;) { KASSERT((cc->cc_mbuf->m_flags & M_EXTPG) == 0, ("%s: not supported for unmapped mbufs", __func__)); src = mtod(cc->cc_mbuf, const char *) + cc->cc_offset; remain = cc->cc_mbuf->m_len - cc->cc_offset; todo = MIN(remain, size); memcpy(dst, src, todo); dst += todo; if (todo < remain) { cc->cc_offset += todo; break; } size -= todo; cc->cc_mbuf = cc->cc_mbuf->m_next; cc->cc_offset = 0; if (size == 0) break; } break; + case CRYPTO_BUF_VMPAGE: + for (;;) { + src = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS( + *cc->cc_vmpage)) + cc->cc_offset; + remain = MIN(PAGE_SIZE - cc->cc_offset, cc->cc_buf_len); + todo = MIN(remain, size); + memcpy(dst, src, todo); + src += todo; + cc->cc_buf_len -= todo; + if (todo < remain) { + cc->cc_offset += todo; + break; + } + size -= todo; + cc->cc_vmpage++; + cc->cc_offset = 0; + if (size == 0) + break; + } + break; case CRYPTO_BUF_UIO: for (;;) { src = (const char *)cc->cc_iov->iov_base + cc->cc_offset; remain = cc->cc_iov->iov_len - cc->cc_offset; todo = MIN(remain, size); memcpy(dst, src, todo); dst += todo; if (todo < remain) { cc->cc_offset += todo; break; } size -= todo; cc->cc_iov++; cc->cc_offset = 0; if (size == 0) break; } break; default: #ifdef INVARIANTS panic("%s: invalid buffer type %d", __func__, cc->cc_type); #endif break; } } /* * To avoid advancing 'cursor', make a local copy that gets advanced * instead. */ void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, void *vdst) { struct crypto_buffer_cursor copy; copy = *cc; crypto_cursor_copydata(©, size, vdst); } /* * Apply function f to the data in an iovec list starting "off" bytes from * the beginning, continuing for "len" bytes. */ static int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, const void *, u_int), void *arg) { struct iovec *iov = uio->uio_iov; int iol = uio->uio_iovcnt; unsigned count; int rval; CUIO_SKIP(); while (len > 0) { KASSERT(iol >= 0, ("%s: empty", __func__)); count = min(iov->iov_len - off, len); rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count); if (rval) return (rval); len -= count; off = 0; iol--; iov++; } return (0); } void crypto_copyback(struct cryptop *crp, int off, int size, const void *src) { struct crypto_buffer *cb; if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) cb = &crp->crp_obuf; else cb = &crp->crp_buf; switch (cb->cb_type) { case CRYPTO_BUF_MBUF: m_copyback(cb->cb_mbuf, off, size, src); break; +#if CRYPTO_MAY_HAVE_VMPAGE + case CRYPTO_BUF_VMPAGE: + MPASS(size <= cb->cb_vm_page_len); + MPASS(size + off <= + cb->cb_vm_page_len + cb->cb_vm_page_offset); + cvm_page_copyback(cb->cb_vm_page, + off + cb->cb_vm_page_offset, size, src); + break; +#endif /* CRYPTO_MAY_HAVE_VMPAGE */ case CRYPTO_BUF_UIO: cuio_copyback(cb->cb_uio, off, size, src); break; case CRYPTO_BUF_CONTIG: MPASS(off + size <= cb->cb_buf_len); bcopy(src, cb->cb_buf + off, size); break; default: #ifdef INVARIANTS panic("invalid crp buf type %d", cb->cb_type); #endif break; } } void crypto_copydata(struct cryptop *crp, int off, int size, void *dst) { switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: m_copydata(crp->crp_buf.cb_mbuf, off, size, dst); break; +#if CRYPTO_MAY_HAVE_VMPAGE + case CRYPTO_BUF_VMPAGE: + MPASS(size <= crp->crp_buf.cb_vm_page_len); + MPASS(size + off <= crp->crp_buf.cb_vm_page_len + + crp->crp_buf.cb_vm_page_offset); + cvm_page_copydata(crp->crp_buf.cb_vm_page, + off + crp->crp_buf.cb_vm_page_offset, size, dst); + break; +#endif /* CRYPTO_MAY_HAVE_VMPAGE */ case CRYPTO_BUF_UIO: cuio_copydata(crp->crp_buf.cb_uio, off, size, dst); break; case CRYPTO_BUF_CONTIG: MPASS(off + size <= crp->crp_buf.cb_buf_len); bcopy(crp->crp_buf.cb_buf + off, dst, size); break; default: #ifdef INVARIANTS panic("invalid crp buf type %d", crp->crp_buf.cb_type); #endif break; } } int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, const void *, u_int), void *arg) { int error; switch (cb->cb_type) { case CRYPTO_BUF_MBUF: error = m_apply(cb->cb_mbuf, off, len, (int (*)(void *, void *, u_int))f, arg); break; case CRYPTO_BUF_UIO: error = cuio_apply(cb->cb_uio, off, len, f, arg); break; +#if CRYPTO_MAY_HAVE_VMPAGE + case CRYPTO_BUF_VMPAGE: + error = cvm_page_apply(cb->cb_vm_page, + off + cb->cb_vm_page_offset, len, f, arg); + break; +#endif /* CRYPTO_MAY_HAVE_VMPAGE */ case CRYPTO_BUF_CONTIG: MPASS(off + len <= cb->cb_buf_len); error = (*f)(arg, cb->cb_buf + off, len); break; default: #ifdef INVARIANTS panic("invalid crypto buf type %d", cb->cb_type); #endif error = 0; break; } return (error); } int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, const void *, u_int), void *arg) { return (crypto_apply_buf(&crp->crp_buf, off, len, f, arg)); } static inline void * m_contiguous_subsegment(struct mbuf *m, size_t skip, size_t len) { int rel_off; MPASS(skip <= INT_MAX); m = m_getptr(m, (int)skip, &rel_off); if (m == NULL) return (NULL); MPASS(rel_off >= 0); skip = rel_off; if (skip + len > m->m_len) return (NULL); return (mtod(m, char*) + skip); } static inline void * cuio_contiguous_segment(struct uio *uio, size_t skip, size_t len) { int rel_off, idx; MPASS(skip <= INT_MAX); idx = cuio_getptr(uio, (int)skip, &rel_off); if (idx < 0) return (NULL); MPASS(rel_off >= 0); skip = rel_off; if (skip + len > uio->uio_iov[idx].iov_len) return (NULL); return ((char *)uio->uio_iov[idx].iov_base + skip); } void * crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, size_t len) { switch (cb->cb_type) { case CRYPTO_BUF_MBUF: return (m_contiguous_subsegment(cb->cb_mbuf, skip, len)); case CRYPTO_BUF_UIO: return (cuio_contiguous_segment(cb->cb_uio, skip, len)); +#if CRYPTO_MAY_HAVE_VMPAGE + case CRYPTO_BUF_VMPAGE: + MPASS(skip + len <= cb->cb_vm_page_len); + return (cvm_page_contiguous_segment(cb->cb_vm_page, + skip + cb->cb_vm_page_offset, len)); +#endif /* CRYPTO_MAY_HAVE_VMPAGE */ case CRYPTO_BUF_CONTIG: MPASS(skip + len <= cb->cb_buf_len); return (cb->cb_buf + skip); default: #ifdef INVARIANTS panic("invalid crp buf type %d", cb->cb_type); #endif return (NULL); } } void * crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len) { return (crypto_buffer_contiguous_subsegment(&crp->crp_buf, skip, len)); } Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 364798) +++ head/sys/opencrypto/crypto.c (revision 364799) @@ -1,2261 +1,2281 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include + #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; u_int32_t cc_sessions; /* (d) # of sessions */ u_int32_t cc_koperations; /* (d) # os asym operations */ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; void *softc; struct crypto_session_params csp; }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ u_int32_t reorder_ops; /* total ordered sym jobs received */ u_int32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; static uma_zone_t cryptoses_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, cryptostats, nitems(cryptostats), "Crypto system statistics"); #define CRYPTOSTAT_INC(stat) do { \ counter_u64_add( \ cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 1); \ } while (0) static void cryptostats_init(void *arg __unused) { COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); } SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); static void cryptostats_fini(void *arg __unused) { COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); } SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, NULL); /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof(struct cryptop), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptoses_zone = uma_zcreate("crypto_session", sizeof(struct crypto_session), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &crypto_tq); taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = mallocarray(crypto_workers_num, sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); explicit_bzero(hmac_key, sizeof(hmac_key)); } void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptoses_zone != NULL) uma_zdestroy(cryptoses_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session->softc); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); default: return (NULL); } } static struct cryptocap * crypto_checkdriver(u_int32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static enum alg_type { ALG_NONE = 0, ALG_CIPHER, ALG_DIGEST, ALG_KEYED_DIGEST, ALG_COMPRESSION, ALG_AEAD } alg_types[] = { [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CBC] = ALG_CIPHER, [CRYPTO_SHA1] = ALG_DIGEST, [CRYPTO_NULL_HMAC] = ALG_DIGEST, [CRYPTO_NULL_CBC] = ALG_CIPHER, [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, [CRYPTO_AES_XTS] = ALG_CIPHER, [CRYPTO_AES_ICM] = ALG_CIPHER, [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, [CRYPTO_CHACHA20] = ALG_CIPHER, [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160] = ALG_DIGEST, [CRYPTO_SHA2_224] = ALG_DIGEST, [CRYPTO_SHA2_256] = ALG_DIGEST, [CRYPTO_SHA2_384] = ALG_DIGEST, [CRYPTO_SHA2_512] = ALG_DIGEST, [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_16] = ALG_AEAD, }; static enum alg_type alg_type(int alg) { if (alg < nitems(alg_types)) return (alg_types[alg]); return (ALG_NONE); } static bool alg_is_compression(int alg) { return (alg_type(alg) == ALG_COMPRESSION); } static bool alg_is_cipher(int alg) { return (alg_type(alg) == ALG_CIPHER); } static bool alg_is_digest(int alg) { return (alg_type(alg) == ALG_DIGEST || alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_keyed_digest(int alg) { return (alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_aead(int alg) { return (alg_type(alg) == ALG_AEAD); } /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); /* IV is optional for digests (e.g. GMAC). */ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); /* * XXX: Would be nice to have a better way to get this * value. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen > 16) return (false); break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; zfree(cses->softc, M_CRYPTO_DATA); uma_zfree(cryptoses_zone, cses); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); res->cap = cap; res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->csp = *csp; /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(u_int32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } size_t crypto_buffer_len(struct crypto_buffer *cb) { switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: return (cb->cb_buf_len); case CRYPTO_BUF_MBUF: if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); + case CRYPTO_BUF_VMPAGE: + return (cb->cb_vm_page_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: return (0); } } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void cb_sanity(struct crypto_buffer *cb, const char *name) { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); - if (cb->cb_type == CRYPTO_BUF_CONTIG) + switch (cb->cb_type) { + case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); + break; + case CRYPTO_BUF_VMPAGE: + KASSERT(CRYPTO_HAS_VMPAGE, + ("incoming crp uses dmap on supported arch")); + KASSERT(cb->cb_vm_page_len >= 0, + ("incoming crp with -ve %s buffer length", name)); + KASSERT(cb->cb_vm_page_offset >= 0, + ("incoming crp with -ve %s buffer offset", name)); + KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, + ("incoming crp with %s buffer offset greater than page size" + , name)); + break; + default: + break; + } } static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; struct crypto_buffer *out; size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; cb_sanity(&crp->crp_buf, "input"); ilen = crypto_buffer_len(&crp->crp_buf); olen = ilen; out = NULL; if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { cb_sanity(&crp->crp_obuf, "output"); out = &crp->crp_obuf; olen = crypto_buffer_len(out); } } else KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, ("incoming crp with separate output buffer " "but no session support")); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("GCM without a separate IV")); if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("CCM without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_aad == NULL) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, ("session doesn't support separate AAD buffer")); KASSERT(crp->crp_aad_start == 0, ("separate AAD buffer with non-zero AAD start")); KASSERT(crp->crp_aad_length != 0, ("separate AAD buffer with zero length")); } } else { KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, ("IV outside buffer length")); } /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= ilen, ("payload outside input buffer")); if (out == NULL) { KASSERT(crp->crp_payload_output_start == 0, ("payload output start non-zero without output buffer")); } else { KASSERT(crp->crp_payload_output_start < olen, ("invalid payload output start")); KASSERT(crp->crp_payload_output_start + crp->crp_payload_length <= olen, ("payload outside output buffer")); } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; else len = olen; KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif CRYPTOSTAT_INC(cs_ops); crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crp->crp_session->cap; if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; CRYPTOSTAT_INC(cs_kops); krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap->cc_dev == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_destroyreq(struct cryptop *crp) { #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; crypto_destroyreq(crp); uma_zfree(cryptop_zone, crp); } static void _crypto_initreq(struct cryptop *crp, crypto_session_t cses) { crp->crp_session = cses; } void crypto_initreq(struct cryptop *crp, crypto_session_t cses) { memset(crp, 0, sizeof(*crp)); _crypto_initreq(crp, cses); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); if (crp != NULL) _crypto_initreq(crp, cses); return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) CRYPTOSTAT_INC(cs_errs); /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) CRYPTOSTAT_INC(cs_kerrs); CRYPTO_DRIVER_LOCK(); cap = krp->krp_cap; KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); CRYPTOSTAT_INC(cs_blocks); } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); CRYPTOSTAT_INC(cs_kblocks); } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; CRYPTOSTAT_INC(cs_intrs); } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) crpt->crp_callback(crpt); if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; CRYPTOSTAT_INC(cs_rets); } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.h =================================================================== --- head/sys/opencrypto/cryptodev.h (revision 364798) +++ head/sys/opencrypto/cryptodev.h (revision 364799) @@ -1,692 +1,734 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #ifdef _KERNEL #include #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 /* Hash values */ #define NULL_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define AES_GMAC_HASH_LEN 16 #define POLY1305_HASH_LEN 16 #define AES_CBC_MAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 #define AES_128_CBC_MAC_KEY_LEN 16 #define AES_192_CBC_MAC_KEY_LEN 24 #define AES_256_CBC_MAC_KEY_LEN 32 #define POLY1305_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define AES_GCM_IV_LEN 12 #define AES_CCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define CAMELLIA_MIN_KEY 16 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #ifdef _KERNEL #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #endif #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_POLY1305 38 #define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */ #define CRYPTO_AES_CCM_16 40 /* cipher side */ #define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ +/* Does the kernel support vmpage buffers on this platform? */ +#ifdef __powerpc__ +#define CRYPTO_MAY_HAVE_VMPAGE 1 +#else +#define CRYPTO_MAY_HAVE_VMPAGE ( PMAP_HAS_DMAP ) +#endif +/* Does the currently running system support vmpage buffers on this platform? */ +#define CRYPTO_HAS_VMPAGE ( PMAP_HAS_DMAP ) + /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ }; /* * session and crypt _op structs are used by userspace programs to interact * with /dev/crypto. Confusingly, the internal kernel interface is named * "cryptop" (no underscore). */ struct session2_op { u_int32_t cipher; /* ie. CRYPTO_AES_CBC */ u_int32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 u_int16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; caddr_t mac; /* must be big enough for chosen MAC */ c_caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; c_caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ c_caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ #define CRIOGET _IOWR('c', 100, u_int32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, u_int32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, u_int32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptostats { uint64_t cs_ops; /* symmetric crypto ops submitted */ uint64_t cs_errs; /* symmetric crypto ops that failed */ uint64_t cs_kops; /* asymetric/key ops submitted */ uint64_t cs_kerrs; /* asymetric/key ops that failed */ uint64_t cs_intrs; /* crypto swi thread activations */ uint64_t cs_rets; /* crypto return thread activations */ uint64_t cs_blocks; /* symmetric op driver block */ uint64_t cs_kblocks; /* symmetric op driver block */ }; #ifdef _KERNEL /* * Return values for cryptodev_probesession methods. */ #define CRYPTODEV_PROBE_HARDWARE (-100) #define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200) #define CRYPTODEV_PROBE_SOFTWARE (-500) #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif struct crypto_session_params { int csp_mode; /* Type of operations to perform. */ #define CSP_MODE_NONE 0 #define CSP_MODE_COMPRESS 1 /* Compression/decompression. */ #define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */ #define CSP_MODE_DIGEST 3 /* Compute/verify digest. */ #define CSP_MODE_AEAD 4 /* Combined auth/encryption. */ #define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */ int csp_flags; #define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ #define CSP_F_SEPARATE_AAD 0x0002 /* Requests can use separate AAD */ int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ const void *csp_cipher_key; int csp_auth_alg; int csp_auth_klen; /* Key length in bytes. */ const void *csp_auth_key; int csp_auth_mlen; /* Number of digest bytes to use. 0 means all. */ }; enum crypto_buffer_type { CRYPTO_BUF_NONE = 0, CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, - CRYPTO_BUF_LAST = CRYPTO_BUF_MBUF + CRYPTO_BUF_VMPAGE, + CRYPTO_BUF_LAST = CRYPTO_BUF_VMPAGE }; /* * Description of a data buffer for a request. Requests can either * have a single buffer that is modified in place or separate input * and output buffers. */ struct crypto_buffer { union { struct { char *cb_buf; int cb_buf_len; }; struct mbuf *cb_mbuf; + struct { + vm_page_t *cb_vm_page; + int cb_vm_page_len; + int cb_vm_page_offset; + }; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; }; /* * A cursor is used to iterate through a crypto request data buffer. */ struct crypto_buffer_cursor { union { char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; + vm_page_t *cc_vmpage; }; - union { - int cc_buf_len; - size_t cc_offset; - }; + /* Optional bytes of valid data remaining */ + int cc_buf_len; + /* + * Optional offset within the current buffer segment where + * valid data begins + */ + size_t cc_offset; enum crypto_buffer_type cc_type; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; crypto_session_t crp_session; /* Session */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ #define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads * if op is synchronous */ #define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* * Dispatch the crypto jobs in the same * order there are submitted. Applied only * if CRYPTO_F_ASYNC flags is set */ #define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */ int crp_op; struct crypto_buffer crp_buf; struct crypto_buffer crp_obuf; void *crp_aad; /* AAD buffer. */ int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ int crp_iv_start; /* Location of IV. IV length is from * the session. */ int crp_payload_start; /* Location of ciphertext. */ int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. */ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */ const void *crp_cipher_key; /* New cipher key if non-NULL. */ const void *crp_auth_key; /* New auth key if non-NULL. */ void *crp_opaque; /* Opaque pointer, passed along */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; static __inline void _crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) { cb->cb_buf = buf; cb->cb_buf_len = len; cb->cb_type = CRYPTO_BUF_CONTIG; } static __inline void _crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) { cb->cb_mbuf = m; cb->cb_type = CRYPTO_BUF_MBUF; } static __inline void +_crypto_use_vmpage(struct crypto_buffer *cb, vm_page_t *pages, int len, + int offset) +{ + cb->cb_vm_page = pages; + cb->cb_vm_page_len = len; + cb->cb_vm_page_offset = offset; + cb->cb_type = CRYPTO_BUF_VMPAGE; +} + +static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; cb->cb_type = CRYPTO_BUF_UIO; } static __inline void crypto_use_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_buf, buf, len); } static __inline void crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_buf, m); } static __inline void +crypto_use_vmpage(struct cryptop *crp, vm_page_t *pages, int len, int offset) +{ + _crypto_use_vmpage(&crp->crp_buf, pages, len, offset); +} + +static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); } static __inline void crypto_use_output_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_obuf, buf, len); } static __inline void crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); +} + +static __inline void +crypto_use_output_vmpage(struct cryptop *crp, vm_page_t *pages, int len, + int offset) +{ + _crypto_use_vmpage(&crp->crp_obuf, pages, len, offset); } static __inline void crypto_use_output_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_obuf, uio); } #define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 #define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT) #define CRYPTO_OP_COMPUTE_DIGEST 0x0 #define CRYPTO_OP_VERIFY_DIGEST 0x2 #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ uint32_t krp_hid; /* device used */ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ void (*krp_callback)(struct cryptkop *); struct cryptocap *krp_cap; }; uint32_t crypto_ses2hid(crypto_session_t crypto_session); uint32_t crypto_ses2caps(crypto_session_t crypto_session); void *crypto_get_driver_session(crypto_session_t crypto_session); const struct crypto_session_params *crypto_get_params( crypto_session_t crypto_session); struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp); struct enc_xform *crypto_cipher(const struct crypto_session_params *csp); MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *params, int hard); extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ #define CRYPTOCAP_F_ACCEL_SOFTWARE 0x08000000 extern int32_t crypto_get_driverid(device_t dev, size_t session_size, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_kregister(u_int32_t, int, u_int32_t); extern int crypto_unregister_all(u_int32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(u_int32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_destroyreq(struct cryptop *crp); extern void crypto_initreq(struct cryptop *crp, crypto_session_t cses); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(crypto_session_t cses, int how); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ #ifdef SYSCTL_DECL SYSCTL_DECL(_kern_crypto); #endif /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); /* * Crypto-related utility routines used mainly by drivers. * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, size_t len); size_t crypto_buffer_len(struct crypto_buffer *cb); void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb); void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, const void *vsrc); void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst); void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, void *vdst); static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { const struct crypto_session_params *csp; csp = crypto_get_params(crp->crp_session); if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) memcpy(iv, crp->crp_iv, csp->csp_ivlen); else crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); } #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ Index: head/sys/opencrypto/cryptosoft.c =================================================================== --- head/sys/opencrypto/cryptosoft.c (revision 364798) +++ head/sys/opencrypto/cryptosoft.c (revision 364799) @@ -1,1528 +1,1532 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; struct auth_hash *sw_axf; uint16_t sw_mlen; }; struct swcr_encdec { void *sw_kschedule; struct enc_xform *sw_exf; }; struct swcr_compdec { struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; int i, blks, inlen, ivlen, outlen, resid; struct crypto_buffer_cursor cc_in, cc_out; const unsigned char *inblk; unsigned char *outblk; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; ivlen = exf->ivsize; if (exf->native_blocksize == 0) { /* Check for non-padded data */ if ((crp->crp_payload_length % exf->blocksize) != 0) return (EINVAL); blks = exf->blocksize; } else blks = exf->native_blocksize; if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); if (crp->crp_cipher_key != NULL) { csp = crypto_get_params(crp->crp_session); error = exf->setkey(sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } crypto_read_iv(crp, iv); if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv); } ivp = iv; crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); inlen = crypto_cursor_seglen(&cc_in); inblk = crypto_cursor_segbase(&cc_in); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; outlen = crypto_cursor_seglen(&cc_out); outblk = crypto_cursor_segbase(&cc_out); resid = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); /* * Loop through encrypting blocks. 'inlen' is the remaining * length of the current segment in the input buffer. * 'outlen' is the remaining length of current segment in the * output buffer. */ while (resid >= blks) { /* * If the current block is not contained within the * current input/output segment, use 'blk' as a local * buffer. */ if (inlen < blks) { crypto_cursor_copydata(&cc_in, blks, blk); inblk = blk; } if (outlen < blks) outblk = blk; /* * Ciphers without a 'reinit' hook are assumed to be * used in CBC mode where the chaining is done here. */ if (exf->reinit != NULL) { if (encrypting) exf->encrypt(sw->sw_kschedule, inblk, outblk); else exf->decrypt(sw->sw_kschedule, inblk, outblk); } else if (encrypting) { /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] = inblk[i] ^ ivp[i]; exf->encrypt(sw->sw_kschedule, outblk, outblk); /* * Keep encrypted block for XOR'ing * with next block */ memcpy(iv, outblk, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; memcpy(nivp, inblk, blks); exf->decrypt(sw->sw_kschedule, inblk, outblk); /* XOR with previous block */ for (i = 0; i < blks; i++) outblk[i] ^= ivp[i]; ivp = nivp; } if (inlen < blks) { inlen = crypto_cursor_seglen(&cc_in); inblk = crypto_cursor_segbase(&cc_in); } else { crypto_cursor_advance(&cc_in, blks); inlen -= blks; inblk += blks; } if (outlen < blks) { crypto_cursor_copyback(&cc_out, blks, blk); outlen = crypto_cursor_seglen(&cc_out); outblk = crypto_cursor_segbase(&cc_out); } else { crypto_cursor_advance(&cc_out, blks); outlen -= blks; outblk += blks; } resid -= blks; } /* Handle trailing partial block for stream ciphers. */ if (resid > 0) { KASSERT(exf->native_blocksize != 0, ("%s: partial block of %d bytes for cipher %s", __func__, i, exf->name)); KASSERT(exf->reinit != NULL, ("%s: partial block cipher %s without reinit hook", __func__, exf->name)); KASSERT(resid < blks, ("%s: partial block too big", __func__)); inlen = crypto_cursor_seglen(&cc_in); outlen = crypto_cursor_seglen(&cc_out); if (inlen < resid) { crypto_cursor_copydata(&cc_in, resid, blk); inblk = blk; } else inblk = crypto_cursor_segbase(&cc_in); if (outlen < resid) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); if (encrypting) exf->encrypt_last(sw->sw_kschedule, inblk, outblk, resid); else exf->decrypt_last(sw->sw_kschedule, inblk, outblk, resid); if (outlen < resid) crypto_cursor_copyback(&cc_out, resid, blk); } explicit_bzero(blk, sizeof(blk)); explicit_bzero(iv, sizeof(iv)); explicit_bzero(iv2, sizeof(iv2)); return (0); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; if (crp->crp_auth_key != NULL) { csp = crypto_get_params(crp->crp_session); swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); if (crp->crp_aad != NULL) err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (err) return err; if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) err = crypto_apply_buf(&crp->crp_obuf, crp->crp_payload_output_start, crp->crp_payload_length, axf->Update, &ctx); else err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (err) return err; switch (axf->type) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Final(aalg, &ctx); break; case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: if (sw->sw_octx == NULL) return EINVAL; axf->Final(aalg, &ctx); bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); break; case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: case CRYPTO_NULL_HMAC: case CRYPTO_POLY1305: axf->Final(aalg, &ctx); break; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char uaalg[HASH_MAX_LEN]; crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) err = EBADMSG; explicit_bzero(uaalg, sizeof(uaalg)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } explicit_bzero(aalg, sizeof(aalg)); return (err); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc; const u_char *inblk; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; int blksz, error, ivlen, len, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); crypto_cursor_init(&cc, &crp->crp_buf); crypto_cursor_advance(&cc, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { len = crypto_cursor_seglen(&cc); if (len >= blksz) { inblk = crypto_cursor_segbase(&cc); len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc, len); } else { len = blksz; crypto_cursor_copydata(&cc, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc, resid, blk); axf->Update(&ctx, blk, blksz); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); error = 0; if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag2)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[GMAC_DIGEST_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; int blksz, error, ivlen, len, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = GMAC_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) { len = rounddown(crp->crp_aad_length, blksz); if (len != 0) axf->Update(&ctx, crp->crp_aad, len); if (crp->crp_aad_length != len) { memset(blk, 0, blksz); memcpy(blk, (char *)crp->crp_aad + len, crp->crp_aad_length - len); axf->Update(&ctx, blk, blksz); } } else { crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_aad_start); for (resid = crp->crp_aad_length; resid >= blksz; resid -= len) { len = crypto_cursor_seglen(&cc_in); if (len >= blksz) { inblk = crypto_cursor_segbase(&cc_in); len = rounddown(MIN(len, resid), blksz); crypto_cursor_advance(&cc_in, len); } else { len = blksz; crypto_cursor_copydata(&cc_in, len, blk); inblk = blk; } axf->Update(&ctx, inblk, len); } if (resid > 0) { memset(blk, 0, blksz); crypto_cursor_copydata(&cc_in, resid, blk); axf->Update(&ctx, blk, blksz); } } exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->encrypt(swe->sw_kschedule, inblk, outblk); axf->Update(&ctx, outblk, blksz); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { axf->Update(&ctx, inblk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } axf->Update(&ctx, blk, resid); } /* length block */ memset(blk, 0, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { u_char tag[AES_CBC_MAC_HASH_LEN]; u_char iv[AES_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; int error, ivlen; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; crypto_read_iv(crp, iv); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, axf->Update, &ctx); if (error) return (error); /* Finalize MAC */ axf->Final(tag, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) error = EBADMSG; explicit_bzero(tag2, sizeof(tag)); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char tag[AES_CBC_MAC_HASH_LEN]; u_char iv[AES_BLOCK_LEN]; struct crypto_buffer_cursor cc_in, cc_out; const u_char *inblk; u_char *outblk; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; int blksz, error, ivlen, r, resid; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = AES_BLOCK_LEN; KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", __func__)); swe = &ses->swcr_encdec; exf = swe->sw_exf; KASSERT(axf->blocksize == exf->native_blocksize, ("%s: blocksize mismatch", __func__)); if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ if (crp->crp_aad != NULL) error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); else error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, axf->Update, &ctx); if (error) return (error); exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { crypto_cursor_init(&cc_out, &crp->crp_obuf); crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); } else cc_out = cc_in; for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); axf->Update(&ctx, inblk, blksz); exf->encrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unencrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, inblk, blk); axf->Update(&ctx, blk, blksz); } } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, resid); exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } else { exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); axf->Update(&ctx, blk, resid); } } /* Finalize MAC */ axf->Final(tag, &ctx); /* Validate tag */ error = 0; if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { u_char tag2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); explicit_bzero(tag2, sizeof(tag2)); if (r != 0) { error = EBADMSG; goto out; } /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); crypto_cursor_init(&cc_in, &crp->crp_buf); crypto_cursor_advance(&cc_in, crp->crp_payload_start); for (resid = crp->crp_payload_length; resid > blksz; resid -= blksz) { if (crypto_cursor_seglen(&cc_in) < blksz) { crypto_cursor_copydata(&cc_in, blksz, blk); inblk = blk; } else { inblk = crypto_cursor_segbase(&cc_in); crypto_cursor_advance(&cc_in, blksz); } if (crypto_cursor_seglen(&cc_out) < blksz) outblk = blk; else outblk = crypto_cursor_segbase(&cc_out); exf->decrypt(swe->sw_kschedule, inblk, outblk); if (outblk == blk) crypto_cursor_copyback(&cc_out, blksz, blk); else crypto_cursor_advance(&cc_out, blksz); } if (resid > 0) { crypto_cursor_copydata(&cc_in, resid, blk); exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); crypto_cursor_copyback(&cc_out, resid, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); } out: explicit_bzero(blkbuf, sizeof(blkbuf)); explicit_bzero(tag, sizeof(tag)); explicit_bzero(iv, sizeof(iv)); return (error); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { u_int8_t *data, *out; struct comp_algo *cxf; int adj; u_int32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf.cb_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_buf.cb_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_buf.cb_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; + case CRYPTO_BUF_VMPAGE: + adj = crp->crp_payload_length - result; + crp->crp_buf.cb_vm_page_len -= adj; + break; default: break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_cipher(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); MPASS(txf->ivsize == csp->csp_ivlen); if (txf->ctxsize != 0) { swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swe->sw_kschedule == NULL) return (ENOMEM); } if (csp->csp_cipher_key != NULL) { error = txf->setkey(swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int swcr_setup_gcm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_nist_gmac_aes_128; break; case 192: axf = &auth_hash_nist_gmac_aes_192; break; case 256: axf = &auth_hash_nist_gmac_aes_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static int swcr_setup_ccm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_ccm_cbc_mac_128; break; case 192: axf = &auth_hash_ccm_cbc_mac_192; break; case 256: axf = &auth_hash_ccm_cbc_mac_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ return (swcr_setup_cipher(ses, csp)); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_CCM_IV_LEN) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; struct swcr_encdec *swe; struct swcr_auth *swa; struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; swe = &ses->swcr_encdec; swa = &ses->swcr_auth; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad cipher algo"); #endif default: error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: error = swcr_setup_gcm(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: error = swcr_setup_ccm(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_cipher(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);