diff --git a/share/man/man9/crypto_request.9 b/share/man/man9/crypto_request.9 index 6253e49dfb32..eb259f96be5e 100644 --- a/share/man/man9/crypto_request.9 +++ b/share/man/man9/crypto_request.9 @@ -1,529 +1,527 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd August 12, 2020 +.Dd February 8, 2021 .Dt CRYPTO_REQUEST 9 .Os .Sh NAME .Nm crypto_request .Nd symmetric cryptographic operations .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int .Fn crypto_dispatch "struct cryptop *crp" +.Ft int +.Fn crypto_dispatch_async "struct cryptop *crp" "int flags" +.Ft void +.Fn crypto_dispatch_batch "struct cryptopq *crpq" "int flags" .Ft void .Fn crypto_destroyreq "struct cryptop *crp" .Ft void .Fn crypto_freereq "struct cryptop *crp" .Ft "struct cryptop *" .Fn crypto_getreq "crypto_session_t cses" "int how" .Ft void .Fn crypto_initreq "crypto_session_t cses" "int how" .Ft void .Fn crypto_use_buf "struct cryptop *crp" "void *buf" "int len" .Ft void .Fn crypto_use_mbuf "struct cryptop *crp" "struct mbuf *m" .Ft void .Fn crypto_use_uio "struct cryptop *crp" "struct uio *uio" .Ft void .Fn crypto_use_vmpage "struct cryptop *crp" "vm_page_t *pages" "int len" "int offset" .Ft void .Fn crypto_use_output_buf "struct cryptop *crp" "void *buf" "int len" .Ft void .Fn crypto_use_output_mbuf "struct cryptop *crp" "struct mbuf *m" .Ft void .Fn crypto_use_output_uio "struct cryptop *crp" "struct uio *uio" .Ft void .Fn crypto_use_output_vmpage "struct cryptop *crp" "vm_page_t *pages" "int len" "int offset" .Sh DESCRIPTION Each symmetric cryptographic operation in the kernel is described by an instance of .Vt struct cryptop and is associated with an active session. .Pp Requests can either be allocated dynamically or use caller-supplied storage. Dynamically allocated requests should be allocated by .Fn crypto_getreq and freed by .Fn crypto_freereq once the request has completed. Requests using caller-supplied storage should be initialized by .Fn crypto_initreq at the start of each operation and destroyed by .Fn crypto_destroyreq once the request has completed. .Pp For both .Fn crypto_getreq and .Fn crypto_initreq , .Fa cses is a reference to an active session. For .Fn crypto_getreq , .Fa how is passed to .Xr malloc 9 and should be set to either .Dv M_NOWAIT or .Dv M_WAITOK . .Pp Once a request has been initialized, the caller should set fields in the structure to describe request-specific parameters. Unused fields should be left as-is. .Pp -.Fn crypto_dispatch -passes a crypto request to the driver attached to the request's session. -If there are errors in the request's fields, this function may return -an error to the caller. +The +.Fn crypto_dispatch , +.Fn crypto_dispatch_async , +and +.Fn crypto_dispatch_batch +functions pass one or more crypto requests to the driver attached to the +request's session. +If there are errors in the request's fields, these functions may return an +error to the caller. If errors are encountered while servicing the request, they will instead be reported to the request's callback function .Pq Fa crp_callback via .Fa crp_etype . .Pp Note that a request's callback function may be invoked before .Fn crypto_dispatch returns. .Pp Once a request has signaled completion by invoking its callback function, it should be freed via .Fn crypto_destroyreq or .Fn crypto_freereq . .Pp Cryptographic operations include several fields to describe the request. .Ss Request Buffers Requests can either specify a single data buffer that is modified in place .Po .Fa crp_buf .Pc or separate input .Po .Fa crp_buf .Pc and output .Po .Fa crp_obuf .Pc buffers. Note that separate input and output buffers are not supported for compression mode requests. .Pp All requests must have a valid .Fa crp_buf initialized by one of the following functions: .Bl -tag -width "Fn crypto_use_vmpage" .It Fn crypto_use_buf Uses an array of .Fa len bytes pointed to by .Fa buf as the data buffer. .It Fn crypto_use_mbuf Uses the network memory buffer .Fa m as the data buffer. .It Fn crypto_use_uio Uses the scatter/gather list .Fa uio as the data buffer. .It Fn crypto_use_vmpage Uses the array of .Vt vm_page_t structures as the data buffer. .El .Pp One of the following functions should be used to initialize .Fa crp_obuf for requests that use separate input and output buffers: .Bl -tag -width "Fn crypto_use_output_vmpage" .It Fn crypto_use_output_buf Uses an array of .Fa len bytes pointed to by .Fa buf as the output buffer. .It Fn crypto_use_output_mbuf Uses the network memory buffer .Fa m as the output buffer. .It Fn crypto_use_output_uio Uses the scatter/gather list .Fa uio as the output buffer. .It Fn crypto_use_output_vmpage Uses the array of .Vt vm_page_t structures as the output buffer. .El .Ss Request Regions Each request describes one or more regions in the data buffers. Each region is described by an offset relative to the start of a data buffer and a length. The length of some regions is the same for all requests belonging to a session. Those lengths are set in the session parameters of the associated session. All requests must define a payload region. Other regions are only required for specific session modes. .Pp For requests with separate input and output data buffers, the AAD, IV, and payload regions are always defined as regions in the input buffer, and a separate payload output region is defined to hold the output of encryption or decryption in the output buffer. The digest region describes a region in the input data buffer for requests that verify an existing digest. For requests that compute a digest, the digest region describes a region in the output data buffer. Note that the only data written to the output buffer is the encryption or decryption result and any computed digest. AAD and IV regions are not copied from the input buffer into the output buffer but are only used as inputs. .Pp The following regions are defined: .Bl -column "Payload Output" "Input/Output" .It Sy Region Ta Sy Buffer Ta Sy Description .It AAD Ta Input Ta Embedded Additional Authenticated Data .It IV Ta Input Ta Embedded IV or nonce .It Payload Ta Input Ta Data to encrypt, decrypt, compress, or decompress .It Payload Output Ta Output Ta Encrypted or decrypted data .It Digest Ta Input/Output Ta Authentication digest, hash, or tag .El .Bl -column "Payload Output" ".Fa crp_payload_output_start" .It Sy Region Ta Sy Start Ta Sy Length .It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length .It IV Ta Fa crp_iv_start Ta Fa csp_ivlen .It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length .It Payload Output Ta Fa crp_payload_output_start Ta Fa crp_payload_length .It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen .El .Pp Requests are permitted to operate on only a subset of the data buffer. For example, requests from IPsec operate on network packets that include headers not used as either additional authentication data (AAD) or payload data. .Ss Request Operations All requests must specify the type of operation to perform in .Fa crp_op . Available operations depend on the session's mode. .Pp Compression requests support the following operations: .Bl -tag -width CRYPTO_OP_DECOMPRESS .It Dv CRYPTO_OP_COMPRESS Compress the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECOMPRESS Decompress the data in the payload region of the data buffer. .El .Pp Cipher requests support the following operations: .Bl -tag -width CRYPTO_OP_DECRYPT .It Dv CRYPTO_OP_ENCRYPT Encrypt the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECRYPT Decrypt the data in the payload region of the data buffer. .El .Pp Digest requests support the following operations: .Bl -tag -width CRYPTO_OP_COMPUTE_DIGEST .It Dv CRYPTO_OP_COMPUTE_DIGEST Calculate a digest over the payload region of the data buffer and store the result in the digest region. .It Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the payload region of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, complete the request successfully. If the digests do not match, fail the request with .Er EBADMSG . .El .Pp AEAD and Encrypt-then-Authenticate requests support the following operations: .Bl -tag -width CRYPTO_OP .It Dv CRYPTO_OP_ENCRYPT | Dv CRYPTO_OP_COMPUTE_DIGEST Encrypt the data in the payload region of the data buffer. Calculate a digest over the AAD and payload regions and store the result in the data buffer. .It Dv CRYPTO_OP_DECRYPT | Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the AAD and payload regions of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, decrypt the payload region. If the digests do not match, fail the request with .Er EBADMSG . .El .Ss Request AAD AEAD and Encrypt-then-Authenticate requests may optionally include Additional Authenticated Data. AAD may either be supplied in the AAD region of the input buffer or as a single buffer pointed to by .Fa crp_aad . In either case, .Fa crp_aad_length always indicates the amount of AAD in bytes. .Ss Request ESN IPsec requests may optionally include Extended Sequence Numbers (ESN). ESN may either be supplied in .Fa crp_esn or as part of the AAD pointed to by .Fa crp_aad . .Pp If the ESN is stored in .Fa crp_esn , .Dv CSP_F_ESN should be set in .Fa csp_flags . This use case is dedicated for encrypt and authenticate mode, since the high-order 32 bits of the sequence number are appended after the Next Header (RFC 4303). .Pp AEAD modes supply the ESN in a separate AAD buffer (see e.g. RFC 4106, Chapter 5 AAD Construction). .Ss Request IV and/or Nonce Some cryptographic operations require an IV or nonce as an input. An IV may be stored either in the IV region of the data buffer or in .Fa crp_iv . By default, the IV is assumed to be stored in the IV region. If the IV is stored in .Fa crp_iv , .Dv CRYPTO_F_IV_SEPARATE should be set in .Fa crp_flags and .Fa crp_iv_start should be left as zero. .Pp Requests that store part, but not all, of the IV in the data buffer should store the partial IV in the data buffer and pass the full IV separately in .Fa crp_iv . .Ss Request and Callback Scheduling The crypto framework provides multiple methods of scheduling the dispatch of requests to drivers along with the processing of driver callbacks. -Requests use flags in -.Fa crp_flags -to select the desired scheduling methods. +The +.Fn crypto_dispatch , +.Fn crypto_dispatch_async , +and +.Fn crypto_dispatch_batch +functions can be used to request different dispatch scheduling policies. .Pp .Fn crypto_dispatch -can pass the request to the session's driver via three different methods: -.Bl -enum -.It -The request is queued to a taskqueue backed by a pool of worker threads. +synchronously passes the request to the driver. +The driver itself may process the request synchronously or asynchronously +depending on whether the driver is implemented by software or hardware. +.Pp +.Fn crypto_dispatch_async +dispatches the request asynchronously. +If the driver is inherently synchronous, the request is queued to a taskqueue +backed by a pool of worker threads. +This can increase througput by allowing requests from a single producer to be +processed in parallel. By default the pool is sized to provide one thread for each CPU. -Worker threads dequeue requests and pass them to the driver -asynchronously. -.It -The request is passed to the driver synchronously in the context of the -thread invoking +Worker threads dequeue requests and pass them to the driver asynchronously. +.Fn crypto_dispatch_async +additionally takes a +.Va flags +parameter. +The +.Dv CRYPTO_ASYNC_ORDERED +flag indicates that completion callbacks for requests must be called in the +same order as requests were dispatched. +If the driver is asynchronous, the behavior of +.Fn crypto_dispatch_async +is identical to that of .Fn crypto_dispatch . -.It -The request is queued to a queue of pending requests. -A single worker thread dequeues requests and passes them to the driver -asynchronously. -.El .Pp -To select the first method (taskqueue backed by multiple threads), -requests should set -.Dv CRYPTO_F_ASYNC . -To always use the third method (queue to single worker thread), -requests should set -.Dv CRYPTO_F_BATCH . -If both flags are set, -.Dv CRYPTO_F_ASYNC -takes precedence. -If neither flag is set, -.Fn crypto_dispatch -will first attempt the second method (invoke driver synchronously). -If the driver is blocked, -the request will be queued using the third method. -One caveat is that the first method is only used for requests using software -drivers which use host CPUs to process requests. -Requests whose session is associated with a hardware driver will ignore -.Dv CRYPTO_F_ASYNC -and only use -.Dv CRYPTO_F_BATCH -to determine how requests should be scheduled. -.Pp -In addition to bypassing synchronous dispatch in -.Fn crypto_dispatch , -.Dv CRYPTO_F_BATCH -requests additional changes aimed at optimizing batches of requests to -the same driver. -When the worker thread processes a request with -.Dv CRYPTO_F_BATCH , -it will search the pending request queue for any other requests for the same -driver, -including requests from different sessions. -If any other requests are present, +.Fn crypto_dispatch_batch +allows the caller to collect a batch of requests and submit them to the driver +at the same time. +This allows hardware drivers to optimize the scheduling of request processing +and batch completion interrupts. +A batch is submitted to the driver by invoking the driver's process method on +each request, specifying .Dv CRYPTO_HINT_MORE -is passed to the driver's process method. -Drivers may use this to batch completion interrupts. +with each request except for the last. +The +.Fa flags +parameter to +.Fn crypto_dispatch_batch +is currently ignored. .Pp Callback function scheduling is simpler than request scheduling. Callbacks can either be invoked synchronously from .Fn crypto_done , or they can be queued to a pool of worker threads. This pool of worker threads is also sized to provide one worker thread for each CPU by default. Note that a callback function invoked synchronously from .Fn crypto_done must follow the same restrictions placed on threaded interrupt handlers. .Pp By default, callbacks are invoked asynchronously by a worker thread. If .Dv CRYPTO_F_CBIMM is set, the callback is always invoked synchronously from .Fn crypto_done . If .Dv CRYPTO_F_CBIFSYNC is set, the callback is invoked synchronously if the request was processed by a software driver or asynchronously if the request was processed by a hardware driver. .Pp If a request was scheduled to the taskqueue via .Dv CRYPTO_F_ASYNC , callbacks are always invoked asynchronously ignoring .Dv CRYPTO_F_CBIMM and .Dv CRYPTO_F_CBIFSYNC . In this case, .Dv CRYPTO_F_ASYNC_KEEPORDER may be set to ensure that callbacks for requests on a given session are invoked in the same order that requests were queued to the session via .Fn crypto_dispatch . This flag is used by IPsec to ensure that decrypted network packets are passed up the network stack in roughly the same order they were received. .Pp .Ss Other Request Fields In addition to the fields and flags enumerated above, .Vt struct cryptop includes the following: .Bl -tag -width crp_payload_length .It Fa crp_session A reference to the active session. This is set when the request is created by .Fn crypto_getreq and should not be modified. Drivers can use this to fetch driver-specific session state or session parameters. .It Fa crp_etype Error status. Either zero on success, or an error if a request fails. Set by drivers prior to completing a request via .Fn crypto_done . .It Fa crp_flags A bitmask of flags. The following flags are available in addition to flags discussed previously: .Bl -tag -width CRYPTO_F_DONE .It Dv CRYPTO_F_DONE Set by .Fa crypto_done before calling .Fa crp_callback . This flag is not very useful and will likely be removed in the future. It can only be safely checked from the callback routine at which point it is always set. .El .It Fa crp_cipher_key Pointer to a request-specific encryption key. If this value is not set, the request uses the session encryption key. .It Fa crp_auth_key Pointer to a request-specific authentication key. If this value is not set, the request uses the session authentication key. .It Fa crp_opaque An opaque pointer. This pointer permits users of the cryptographic framework to store information about a request to be used in the callback. .It Fa crp_callback Callback function. This must point to a callback function of type .Vt void (*)(struct cryptop *) . The callback function should inspect .Fa crp_etype to determine the status of the completed operation. It should also arrange for the request to be freed via .Fn crypto_freereq . .It Fa crp_olen Used with compression and decompression requests to describe the updated length of the payload region in the data buffer. .Pp If a compression request increases the size of the payload, then the data buffer is unmodified, the request completes successfully, and .Fa crp_olen is set to the size the compressed data would have used. Callers can compare this to the payload region length to determine if the compressed data was discarded. .El .Sh RETURN VALUES .Fn crypto_dispatch returns an error if the request contained invalid fields, or zero if the request was valid. .Fn crypto_getreq returns a pointer to a new request structure on success, or .Dv NULL on failure. .Dv NULL can only be returned if .Dv M_NOWAIT was passed in .Fa how . .Sh SEE ALSO .Xr ipsec 4 , .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_session 9 , .Xr mbuf 9 .Xr uio 9 .Sh BUGS Not all drivers properly handle mixing session and per-request keys within a single session. Consumers should either use a single key for a session specified in the session parameters or always use per-request keys. diff --git a/sys/geom/eli/g_eli_integrity.c b/sys/geom/eli/g_eli_integrity.c index 4cf982e3ddfa..e79ec136aa2e 100644 --- a/sys/geom/eli/g_eli_integrity.c +++ b/sys/geom/eli/g_eli_integrity.c @@ -1,556 +1,566 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The data layout description when integrity verification is configured. * * One of the most important assumption here is that authenticated data and its * HMAC has to be stored in the same place (namely in the same sector) to make * it work reliable. * The problem is that file systems work only with sectors that are multiple of * 512 bytes and a power of two number. * My idea to implement it is as follows. * Let's store HMAC in sector. This is a must. This leaves us 480 bytes for * data. We can't use that directly (ie. we can't create provider with 480 bytes * sector size). We need another sector from where we take only 32 bytes of data * and we store HMAC of this data as well. This takes two sectors from the * original provider at the input and leaves us one sector of authenticated data * at the output. Not very efficient, but you got the idea. * Now, let's assume, we want to create provider with 4096 bytes sector. * To output 4096 bytes of authenticated data we need 8x480 plus 1x256, so we * need nine 512-bytes sectors at the input to get one 4096-bytes sector at the * output. That's better. With 4096 bytes sector we can use 89% of size of the * original provider. I find it as an acceptable cost. * The reliability comes from the fact, that every HMAC stored inside the sector * is calculated only for the data in the same sector, so its impossible to * write new data and leave old HMAC or vice versa. * * And here is the picture: * * da0: +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |480b| |32b |256b | * |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data| |HMAC|Data | * +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+----+ +----+-----+ * |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |512 bytes| |288 bytes | * +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ +---------+ |224 unused| * +----------+ * da0.eli: +----+----+----+----+----+----+----+----+----+ * |480b|480b|480b|480b|480b|480b|480b|480b|256b| * +----+----+----+----+----+----+----+----+----+ * | 4096 bytes | * +--------------------------------------------+ * * PS. You can use any sector size with geli(8). My example is using 4kB, * because it's most efficient. For 8kB sectors you need 2 extra sectors, * so the cost is the same as for 4kB sectors. */ /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_auth_run -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ MALLOC_DECLARE(M_ELI); /* * Here we generate key for HMAC. Every sector has its own HMAC key, so it is * not possible to copy sectors. * We cannot depend on fact, that every sector has its own IV, because different * IV doesn't change HMAC, when we use encrypt-then-authenticate method. */ static void g_eli_auth_keygen(struct g_eli_softc *sc, off_t offset, u_char *key) { SHA256_CTX ctx; /* Copy precalculated SHA256 context. */ bcopy(&sc->sc_akeyctx, &ctx, sizeof(ctx)); SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); SHA256_Final(key, &ctx); } /* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver */ static int g_eli_auth_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; sc = bp->bio_to->geom->softc; if (crp->crp_etype == 0) { bp->bio_completed += crp->crp_payload_length; G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).", bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed); } else { u_int nsec, decr_secsize, encr_secsize, rel_sec; int *errorp; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Which relative sector this request decrypted. */ rel_sec = ((crp->crp_buf.cb_buf + crp->crp_payload_start) - (char *)bp->bio_driver2) / encr_secsize; errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec + sizeof(int) * rel_sec); *errorp = crp->crp_etype; G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY) bp->bio_error = crp->crp_etype == EBADMSG ? EINTEGRITY : crp->crp_etype; } if (crp->crp_cipher_key != NULL) g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); crypto_freereq(crp); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error == 0) { u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; u_char *srcdata, *dstdata; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; srcdata = bp->bio_driver2; dstdata = bp->bio_data; for (i = 1; i <= nsec; i++) { data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); srcdata += encr_secsize; dstdata += data_secsize; } } else if (bp->bio_error == EINTEGRITY) { u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; int *errorp; off_t coroff, corsize, dstoff; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec); coroff = -1; corsize = 0; dstoff = bp->bio_offset; for (i = 1; i <= nsec; i++) { data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (errorp[i - 1] == EBADMSG) { /* * Corruption detected, remember the offset if * this is the first corrupted sector and * increase size. */ if (coroff == -1) coroff = dstoff; corsize += data_secsize; } else { /* * No corruption, good. * Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); coroff = -1; corsize = 0; } } dstoff += data_secsize; } /* Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); } } free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { if (bp->bio_error != EINTEGRITY) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); } bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } /* * The function is called after data encryption. * * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_auth_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *bp, *cbp, *cbp2; u_int nsec; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; if (crp->crp_cipher_key != NULL) g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); crypto_freereq(crp); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_write_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; cbp->bio_data = bp->bio_driver2; /* * We write more than what is requested, so we have to be ready to write * more than maxphys. */ cbp2 = NULL; if (cbp->bio_length > maxphys) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - maxphys; cbp2->bio_data = cbp->bio_data + maxphys; cbp2->bio_offset = cbp->bio_offset + maxphys; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_write_done; cbp->bio_length = maxphys; } /* * Send encrypted data to the provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); bp->bio_inbed = 0; bp->bio_children = (cbp2 != NULL ? 2 : 1); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } return (0); } void g_eli_auth_read(struct g_eli_softc *sc, struct bio *bp) { struct g_consumer *cp; struct bio *cbp, *cbp2; size_t size; off_t nsec; bp->bio_pflags = 0; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_read_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; size = cbp->bio_length; size += sizeof(int) * nsec; size += G_ELI_AUTH_SECKEYLEN * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK); cbp->bio_data = bp->bio_driver2; /* Clear the error array. */ memset((char *)bp->bio_driver2 + cbp->bio_length, 0, sizeof(int) * nsec); /* * We read more than what is requested, so we have to be ready to read * more than maxphys. */ cbp2 = NULL; if (cbp->bio_length > maxphys) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - maxphys; cbp2->bio_data = cbp->bio_data + maxphys; cbp2->bio_offset = cbp->bio_offset + maxphys; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_read_done; cbp->bio_length = maxphys; } /* * Read encrypted data from provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } } /* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> G_ELI_AUTH_RUN -> g_eli_auth_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_AUTH_RUN -> g_eli_auth_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_auth_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; + struct cryptopq crpq; struct cryptop *crp; u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; off_t dstoff; u_char *p, *data, *authkey, *plaindata; int error; + bool batch; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; /* Destination offset, used for IV generation. */ dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; plaindata = bp->bio_data; if (bp->bio_cmd == BIO_READ) { data = bp->bio_driver2; p = data + encr_secsize * nsec; p += sizeof(int) * nsec; } else { size_t size; size = encr_secsize * nsec; size += G_ELI_AUTH_SECKEYLEN * nsec; size += sizeof(uintptr_t); /* Space for alignment. */ data = malloc(size, M_ELI, M_WAITOK); bp->bio_driver2 = data; p = data + encr_secsize * nsec; } bp->bio_inbed = 0; bp->bio_children = nsec; #if defined(__mips_n64) || defined(__mips_o64) p = (char *)roundup((uintptr_t)p, sizeof(uintptr_t)); #endif + TAILQ_INIT(&crpq); + batch = atomic_load_int(&g_eli_batch) != 0; + for (i = 1; i <= nsec; i++, dstoff += encr_secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN; data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) { data_secsize = decr_secsize % data_secsize; /* * Last encrypted sector of each decrypted sector is * only partially filled. */ if (bp->bio_cmd == BIO_WRITE) memset(data + sc->sc_alen + data_secsize, 0, encr_secsize - sc->sc_alen - data_secsize); } if (bp->bio_cmd == BIO_WRITE) { bcopy(plaindata, data + sc->sc_alen, data_secsize); plaindata += data_secsize; } crypto_use_buf(crp, data, sc->sc_alen + data_secsize); crp->crp_opaque = (void *)bp; data += encr_secsize; crp->crp_flags = CRYPTO_F_CBIFSYNC; - if (g_eli_batch) - crp->crp_flags |= CRYPTO_F_BATCH; if (bp->bio_cmd == BIO_WRITE) { crp->crp_callback = g_eli_auth_write_done; crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST; } else { crp->crp_callback = g_eli_auth_read_done; crp->crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST; } crp->crp_digest_start = 0; crp->crp_payload_start = sc->sc_alen; crp->crp_payload_length = data_secsize; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) { crp->crp_cipher_key = g_eli_key_hold(sc, dstoff, encr_secsize); } if (g_eli_ivlen(sc->sc_ealgo) != 0) { crp->crp_flags |= CRYPTO_F_IV_SEPARATE; g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv, sizeof(crp->crp_iv)); } g_eli_auth_keygen(sc, dstoff, authkey); crp->crp_auth_key = authkey; - error = crypto_dispatch(crp); - KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)", - error)); + if (batch) { + TAILQ_INSERT_TAIL(&crpq, crp, crp_next); + } else { + error = crypto_dispatch(crp); + KASSERT(error == 0, + ("crypto_dispatch() failed (error=%d)", error)); + } } + + if (batch) + crypto_dispatch_batch(&crpq, 0); } diff --git a/sys/geom/eli/g_eli_privacy.c b/sys/geom/eli/g_eli_privacy.c index adb353441e3d..f4e0416cc828 100644 --- a/sys/geom/eli/g_eli_privacy.c +++ b/sys/geom/eli/g_eli_privacy.c @@ -1,347 +1,355 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ MALLOC_DECLARE(M_ELI); /* * Copy data from a (potentially unmapped) bio to a kernelspace buffer. * * The buffer must have at least as much room as bp->bio_length. */ static void g_eli_bio_copyin(struct bio *bp, void *kaddr) { struct uio uio; struct iovec iov[1]; iov[0].iov_base = kaddr; iov[0].iov_len = bp->bio_length; uio.uio_iov = iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = bp->bio_length; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uiomove_fromphys(bp->bio_ma, bp->bio_ma_offset, bp->bio_length, &uio); } /* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver */ static int g_eli_crypto_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", bp->bio_inbed, bp->bio_children); bp->bio_completed += crp->crp_payload_length; } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; if (sc != NULL && crp->crp_cipher_key != NULL) g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); crypto_freereq(crp); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); if (sc != NULL) atomic_subtract_int(&sc->sc_inflight, 1); return (0); } /* * The function is called after data encryption. * * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_crypto_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct bio *bp, *cbp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } gp = bp->bio_to->geom; sc = gp->softc; if (crp->crp_cipher_key != NULL) g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key)); crypto_freereq(crp); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); bp->bio_inbed = 0; bp->bio_children = 1; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cbp->bio_data = bp->bio_driver2; /* * Clear BIO_UNMAPPED, which was inherited from where we cloned the bio * in g_eli_start, because we manually set bio_data */ cbp->bio_flags &= ~BIO_UNMAPPED; cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Send encrypted data to the provider. */ g_io_request(cbp, cp); return (0); } /* * The function is called to read encrypted data. * * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker) { struct g_consumer *cp; struct bio *cbp; if (!fromworker) { /* * We are not called from the worker thread, so check if * device is suspended. */ mtx_lock(&sc->sc_queue_mtx); if (sc->sc_flags & G_ELI_FLAG_SUSPEND) { /* * If device is suspended, we place the request onto * the queue, so it can be handled after resume. */ G_ELI_DEBUG(0, "device suspended, move onto queue"); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); return; } atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); } bp->bio_pflags = 0; bp->bio_driver2 = NULL; cbp = bp->bio_driver1; cbp->bio_done = g_eli_read_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Read encrypted data from provider. */ g_io_request(cbp, cp); } /* * This is the main function responsible for cryptography (ie. communication * with crypto(9) subsystem). * * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> G_ELI_CRYPTO_RUN -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> G_ELI_CRYPTO_RUN -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ void g_eli_crypto_run(struct g_eli_worker *wr, struct bio *bp) { struct g_eli_softc *sc; + struct cryptopq crpq; struct cryptop *crp; vm_page_t *pages; u_int i, nsec, secsize; off_t dstoff; u_char *data = NULL; - int error; - int pages_offset; + int error, pages_offset; + bool batch; G_ELI_LOGREQ(3, bp, "%s", __func__); bp->bio_pflags = wr->w_number; sc = wr->w_softc; secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize; nsec = bp->bio_length / secsize; bp->bio_inbed = 0; bp->bio_children = nsec; /* * If we write the data we cannot destroy current bio_data content, * so we need to allocate more memory for encrypted data. */ if (bp->bio_cmd == BIO_WRITE) { data = malloc(bp->bio_length, M_ELI, M_WAITOK); bp->bio_driver2 = data; /* * This copy could be eliminated by using crypto's output * buffer, instead of using a single overwriting buffer. */ if ((bp->bio_flags & BIO_UNMAPPED) != 0) g_eli_bio_copyin(bp, data); else bcopy(bp->bio_data, data, bp->bio_length); } else { if ((bp->bio_flags & BIO_UNMAPPED) != 0) { pages = bp->bio_ma; pages_offset = bp->bio_ma_offset; } else { data = bp->bio_data; } } + TAILQ_INIT(&crpq); + batch = atomic_load_int(&g_eli_batch) != 0; + for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) { crp = crypto_getreq(wr->w_sid, M_WAITOK); if (data) { crypto_use_buf(crp, data, secsize); data += secsize; } else { MPASS(pages != NULL); crypto_use_vmpage(crp, pages, secsize, pages_offset); pages_offset += secsize; pages += pages_offset >> PAGE_SHIFT; pages_offset &= PAGE_MASK; } crp->crp_opaque = (void *)bp; if (bp->bio_cmd == BIO_WRITE) { crp->crp_op = CRYPTO_OP_ENCRYPT; crp->crp_callback = g_eli_crypto_write_done; } else /* if (bp->bio_cmd == BIO_READ) */ { crp->crp_op = CRYPTO_OP_DECRYPT; crp->crp_callback = g_eli_crypto_read_done; } crp->crp_flags = CRYPTO_F_CBIFSYNC; - if (g_eli_batch) - crp->crp_flags |= CRYPTO_F_BATCH; - crp->crp_payload_start = 0; crp->crp_payload_length = secsize; if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) { crp->crp_cipher_key = g_eli_key_hold(sc, dstoff, secsize); } if (g_eli_ivlen(sc->sc_ealgo) != 0) { crp->crp_flags |= CRYPTO_F_IV_SEPARATE; g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv, sizeof(crp->crp_iv)); } - error = crypto_dispatch(crp); - KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)", - error)); + if (batch) { + TAILQ_INSERT_TAIL(&crpq, crp, crp_next); + } else { + error = crypto_dispatch(crp); + KASSERT(error == 0, + ("crypto_dispatch() failed (error=%d)", error)); + } } + + if (batch) + crypto_dispatch_batch(&crpq, 0); } diff --git a/sys/kgssapi/krb5/kcrypto_aes.c b/sys/kgssapi/krb5/kcrypto_aes.c index 38faeb37066c..ecc8603036fe 100644 --- a/sys/kgssapi/krb5/kcrypto_aes.c +++ b/sys/kgssapi/krb5/kcrypto_aes.c @@ -1,382 +1,382 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "kcrypto.h" struct aes_state { struct mtx as_lock; crypto_session_t as_session_aes; crypto_session_t as_session_sha1; }; static void aes_init(struct krb5_key_state *ks) { struct aes_state *as; as = malloc(sizeof(struct aes_state), M_GSSAPI, M_WAITOK|M_ZERO); mtx_init(&as->as_lock, "gss aes lock", NULL, MTX_DEF); ks->ks_priv = as; } static void aes_destroy(struct krb5_key_state *ks) { struct aes_state *as = ks->ks_priv; if (as->as_session_aes != 0) crypto_freesession(as->as_session_aes); if (as->as_session_sha1 != 0) crypto_freesession(as->as_session_sha1); mtx_destroy(&as->as_lock); free(ks->ks_priv, M_GSSAPI); } static void aes_set_key(struct krb5_key_state *ks, const void *in) { void *kp = ks->ks_key; struct aes_state *as = ks->ks_priv; struct crypto_session_params csp; if (kp != in) bcopy(in, kp, ks->ks_class->ec_keylen); if (as->as_session_aes != 0) crypto_freesession(as->as_session_aes); if (as->as_session_sha1 != 0) crypto_freesession(as->as_session_sha1); /* * We only want the first 96 bits of the HMAC. */ memset(&csp, 0, sizeof(csp)); csp.csp_mode = CSP_MODE_DIGEST; csp.csp_auth_alg = CRYPTO_SHA1_HMAC; csp.csp_auth_klen = ks->ks_class->ec_keybits / 8; csp.csp_auth_mlen = 12; csp.csp_auth_key = ks->ks_key; crypto_newsession(&as->as_session_sha1, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); memset(&csp, 0, sizeof(csp)); csp.csp_mode = CSP_MODE_CIPHER; csp.csp_cipher_alg = CRYPTO_AES_CBC; csp.csp_cipher_klen = ks->ks_class->ec_keybits / 8; csp.csp_cipher_key = ks->ks_key; csp.csp_ivlen = 16; crypto_newsession(&as->as_session_aes, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); } static void aes_random_to_key(struct krb5_key_state *ks, const void *in) { aes_set_key(ks, in); } static int aes_crypto_cb(struct cryptop *crp) { int error; struct aes_state *as = (struct aes_state *) crp->crp_opaque; - if (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC) + if (CRYPTO_SESS_SYNC(crp->crp_session)) return (0); error = crp->crp_etype; if (error == EAGAIN) error = crypto_dispatch(crp); mtx_lock(&as->as_lock); if (error || (crp->crp_flags & CRYPTO_F_DONE)) wakeup(crp); mtx_unlock(&as->as_lock); return (0); } static void aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, size_t skip, size_t len, void *ivec, bool encrypt) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; int error; crp = crypto_getreq(as->as_session_aes, M_WAITOK); crp->crp_payload_start = skip; crp->crp_payload_length = len; crp->crp_op = encrypt ? CRYPTO_OP_ENCRYPT : CRYPTO_OP_DECRYPT; crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE; if (ivec) { memcpy(crp->crp_iv, ivec, 16); } else { memset(crp->crp_iv, 0, 16); } if (buftype == CRYPTO_BUF_MBUF) crypto_use_mbuf(crp, buf); else crypto_use_buf(crp, buf, skip + len); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); - if ((crypto_ses2caps(as->as_session_aes) & CRYPTOCAP_F_SYNC) == 0) { + if (!CRYPTO_SESS_SYNC(as->as_session_aes)) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); } static void aes_encrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off; /* * AES encryption with cyphertext stealing: * * CTSencrypt(P[0], ..., P[n], IV, K): * len = length(P[n]) * (C[0], ..., C[n-2], E[n-1]) = * CBCencrypt(P[0], ..., P[n-1], IV, K) * P = pad(P[n], 0, blocksize) * E[n] = CBCencrypt(P, E[n-1], K); * C[n-1] = E[n] * C[n] = E[n-1]{0..len-1} */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, true); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. We assume that neither of the * last two blocks cross iov boundaries. */ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, true); off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); } else { /* * This is the difficult case. We encrypt all but the * last partial block first. We then create a padded * copy of the last block and encrypt that using the * second to last encrypted block as IV. Once we have * the encrypted versions of the last two blocks, we * reshuffle to create the final result. */ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len - plen, ivec, true); /* * Copy out the last two blocks, pad the last block * and encrypt it. Rearrange to get the final * result. The cyphertext for cn_1 is in cn. The * cyphertext for cn is the first plen bytes of what * is in cn_1 now. */ off = skip + len - blocklen - plen; m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = plen; i < blocklen; i++) last2.cn[i] = 0; aes_encrypt_1(ks, CRYPTO_BUF_CONTIG, last2.cn, 0, blocklen, last2.cn_1, true); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, plen, last2.cn_1); } } static void aes_decrypt(const struct krb5_key_state *ks, struct mbuf *inout, size_t skip, size_t len, void *ivec, size_t ivlen) { size_t blocklen = 16, plen; struct { uint8_t cn_1[16], cn[16]; } last2; int i, off, t; /* * AES decryption with cyphertext stealing: * * CTSencrypt(C[0], ..., C[n], IV, K): * len = length(C[n]) * E[n] = C[n-1] * X = decrypt(E[n], K) * P[n] = (X ^ C[n]){0..len-1} * E[n-1] = {C[n,0],...,C[n,len-1],X[len],...,X[blocksize-1]} * (P[0],...,P[n-1]) = CBCdecrypt(C[0],...,C[n-2],E[n-1], IV, K) */ plen = len % blocklen; if (len == blocklen) { /* * Note: caller will ensure len >= blocklen. */ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, false); } else if (plen == 0) { /* * This is equivalent to CBC mode followed by swapping * the last two blocks. */ off = skip + len - 2 * blocklen; m_copydata(inout, off, 2 * blocklen, (void*) &last2); m_copyback(inout, off, blocklen, last2.cn); m_copyback(inout, off + blocklen, blocklen, last2.cn_1); aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, false); } else { /* * This is the difficult case. We first decrypt the * second to last block with a zero IV to make X. The * plaintext for the last block is the XOR of X and * the last cyphertext block. * * We derive a new cypher text for the second to last * block by mixing the unused bytes of X with the last * cyphertext block. The result of that can be * decrypted with the rest in CBC mode. */ off = skip + len - plen - blocklen; aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, off, blocklen, NULL, false); m_copydata(inout, off, blocklen + plen, (void*) &last2); for (i = 0; i < plen; i++) { t = last2.cn[i]; last2.cn[i] ^= last2.cn_1[i]; last2.cn_1[i] = t; } m_copyback(inout, off, blocklen + plen, (void*) &last2); aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len - plen, ivec, false); } } static void aes_checksum(const struct krb5_key_state *ks, int usage, struct mbuf *inout, size_t skip, size_t inlen, size_t outlen) { struct aes_state *as = ks->ks_priv; struct cryptop *crp; int error; crp = crypto_getreq(as->as_session_sha1, M_WAITOK); crp->crp_payload_start = skip; crp->crp_payload_length = inlen; crp->crp_digest_start = skip + inlen; crp->crp_flags = CRYPTO_F_CBIFSYNC; crypto_use_mbuf(crp, inout); crp->crp_opaque = as; crp->crp_callback = aes_crypto_cb; error = crypto_dispatch(crp); - if ((crypto_ses2caps(as->as_session_sha1) & CRYPTOCAP_F_SYNC) == 0) { + if (!CRYPTO_SESS_SYNC(as->as_session_sha1)) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); mtx_unlock(&as->as_lock); } crypto_freereq(crp); } struct krb5_encryption_class krb5_aes128_encryption_class = { "aes128-cts-hmac-sha1-96", /* name */ ETYPE_AES128_CTS_HMAC_SHA1_96, /* etype */ EC_DERIVED_KEYS, /* flags */ 16, /* blocklen */ 1, /* msgblocklen */ 12, /* checksumlen */ 128, /* keybits */ 16, /* keylen */ aes_init, aes_destroy, aes_set_key, aes_random_to_key, aes_encrypt, aes_decrypt, aes_checksum }; struct krb5_encryption_class krb5_aes256_encryption_class = { "aes256-cts-hmac-sha1-96", /* name */ ETYPE_AES256_CTS_HMAC_SHA1_96, /* etype */ EC_DERIVED_KEYS, /* flags */ 16, /* blocklen */ 1, /* msgblocklen */ 12, /* checksumlen */ 256, /* keybits */ 32, /* keylen */ aes_init, aes_destroy, aes_set_key, aes_random_to_key, aes_encrypt, aes_decrypt, aes_checksum }; diff --git a/sys/netipsec/xform_ah.c b/sys/netipsec/xform_ah.c index 5163bda86931..774f11a16c44 100644 --- a/sys/netipsec/xform_ah.c +++ b/sys/netipsec/xform_ah.c @@ -1,1164 +1,1166 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_ah.c,v 1.63 2001/06/26 06:18:58 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist. * * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 1999 Niklas Hallqvist. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include /* * Return header size in bytes. The old protocol did not support * the replay counter; the new protocol always includes the counter. */ #define HDRSIZE(sav) \ (((sav)->flags & SADB_X_EXT_OLD) ? \ sizeof (struct ah) : sizeof (struct ah) + sizeof (u_int32_t)) /* * Return authenticator size in bytes, based on a field in the * algorithm descriptor. */ #define AUTHSIZE(sav) ((sav->flags & SADB_X_EXT_OLD) ? 16 : \ xform_ah_authsize((sav)->tdb_authalgxform)) VNET_DEFINE(int, ah_enable) = 1; /* control flow of packets with AH */ VNET_DEFINE(int, ah_cleartos) = 1; /* clear ip_tos when doing AH calc */ VNET_PCPUSTAT_DEFINE(struct ahstat, ahstat); VNET_PCPUSTAT_SYSINIT(ahstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(ahstat); #endif /* VIMAGE */ #ifdef INET SYSCTL_DECL(_net_inet_ah); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_enable), 0, ""); SYSCTL_INT(_net_inet_ah, OID_AUTO, ah_cleartos, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, ""); SYSCTL_VNET_PCPUSTAT(_net_inet_ah, IPSECCTL_STATS, stats, struct ahstat, ahstat, "AH statistics (struct ahstat, netipsec/ah_var.h)"); #endif static unsigned char ipseczeroes[256]; /* larger than an ip6 extension hdr */ static int ah_input_cb(struct cryptop*); static int ah_output_cb(struct cryptop*); int xform_ah_authsize(const struct auth_hash *esph) { int alen; if (esph == NULL) return 0; switch (esph->type) { case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: alen = esph->hashsize / 2; /* RFC4868 2.3 */ break; case CRYPTO_AES_NIST_GMAC: alen = esph->hashsize; break; default: alen = AH_HMAC_HASHLEN; break; } return alen; } size_t ah_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { int authsize, rplen, align; IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null xform")); /*XXX not right for null algorithm--does it matter??*/ /* RFC4302: use the correct alignment. */ align = sizeof(uint32_t); #ifdef INET6 if (sav->sah->saidx.dst.sa.sa_family == AF_INET6) { align = sizeof(uint64_t); } #endif rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); size = roundup(rplen + authsize, align); } else { /* default guess */ size = sizeof (struct ah) + sizeof (u_int32_t) + 16; } return size; } /* * NB: public for use by esp_init. */ int ah_init0(struct secasvar *sav, struct xformsw *xsp, struct crypto_session_params *csp) { const struct auth_hash *thash; int keylen; thash = auth_algorithm_lookup(sav->alg_auth); if (thash == NULL) { DPRINTF(("%s: unsupported authentication algorithm %u\n", __func__, sav->alg_auth)); return EINVAL; } /* * Verify the replay state block allocation is consistent with * the protocol type. We check here so we can make assumptions * later during protocol processing. */ /* NB: replay state is setup elsewhere (sigh) */ if (((sav->flags&SADB_X_EXT_OLD) == 0) ^ (sav->replay != NULL)) { DPRINTF(("%s: replay state block inconsistency, " "%s algorithm %s replay state\n", __func__, (sav->flags & SADB_X_EXT_OLD) ? "old" : "new", sav->replay == NULL ? "without" : "with")); return EINVAL; } if (sav->key_auth == NULL) { DPRINTF(("%s: no authentication key for %s algorithm\n", __func__, thash->name)); return EINVAL; } keylen = _KEYLEN(sav->key_auth); if (keylen > thash->keysize && thash->keysize != 0) { DPRINTF(("%s: invalid keylength %d, algorithm %s requires " "keysize less than %d\n", __func__, keylen, thash->name, thash->keysize)); return EINVAL; } sav->tdb_xform = xsp; sav->tdb_authalgxform = thash; /* Initialize crypto session. */ csp->csp_auth_alg = sav->tdb_authalgxform->type; if (csp->csp_auth_alg != CRYPTO_NULL_HMAC) { csp->csp_auth_klen = _KEYBITS(sav->key_auth) / 8; csp->csp_auth_key = sav->key_auth->key_data; }; csp->csp_auth_mlen = AUTHSIZE(sav); return 0; } /* * ah_init() is called when an SPI is being set up. */ static int ah_init(struct secasvar *sav, struct xformsw *xsp) { struct crypto_session_params csp; int error; memset(&csp, 0, sizeof(csp)); csp.csp_mode = CSP_MODE_DIGEST; if (sav->flags & SADB_X_SAFLAGS_ESN) csp.csp_flags |= CSP_F_ESN; error = ah_init0(sav, xsp, &csp); return error ? error : crypto_newsession(&sav->tdb_cryptoid, &csp, V_crypto_support); } static void ah_cleanup(struct secasvar *sav) { crypto_freesession(sav->tdb_cryptoid); sav->tdb_cryptoid = NULL; sav->tdb_authalgxform = NULL; } /* * Massage IPv4/IPv6 headers for AH processing. */ static int ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out) { struct mbuf *m = *m0; unsigned char *ptr; int off, count; #ifdef INET struct ip *ip; #endif /* INET */ #ifdef INET6 struct ip6_ext *ip6e; struct ip6_hdr ip6; int ad, alloc, nxt, noff; #endif /* INET6 */ switch (proto) { #ifdef INET case AF_INET: /* * This is the least painful way of dealing with IPv4 header * and option processing -- just make sure they're in * contiguous memory. */ *m0 = m = m_pullup(m, skip); if (m == NULL) { DPRINTF(("%s: m_pullup failed\n", __func__)); return ENOBUFS; } /* Fix the IP header */ ip = mtod(m, struct ip *); if (V_ah_cleartos) ip->ip_tos = 0; ip->ip_ttl = 0; ip->ip_sum = 0; ip->ip_off = htons(0); ptr = mtod(m, unsigned char *); /* IPv4 option processing */ for (off = sizeof(struct ip); off < skip;) { if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP || off + 1 < skip) ; else { DPRINTF(("%s: illegal IPv4 option length for " "option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } switch (ptr[off]) { case IPOPT_EOL: off = skip; /* End the loop. */ break; case IPOPT_NOP: off++; break; case IPOPT_SECURITY: /* 0x82 */ case 0x85: /* Extended security. */ case 0x86: /* Commercial security. */ case 0x94: /* Router alert */ case 0x95: /* RFC1770 */ /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } off += ptr[off + 1]; break; case IPOPT_LSRR: case IPOPT_SSRR: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* * On output, if we have either of the * source routing options, we should * swap the destination address of the * IP header with the last address * specified in the option, as that is * what the destination's IP header * will look like. */ if (out) bcopy(ptr + off + ptr[off + 1] - sizeof(struct in_addr), &(ip->ip_dst), sizeof(struct in_addr)); /* Fall through */ default: /* Sanity check for option length. */ if (ptr[off + 1] < 2) { DPRINTF(("%s: illegal IPv4 option " "length for option %d\n", __func__, ptr[off])); m_freem(m); return EINVAL; } /* Zeroize all other options. */ count = ptr[off + 1]; bcopy(ipseczeroes, ptr + off, count); off += count; break; } /* Sanity check. */ if (off > skip) { DPRINTF(("%s: malformed IPv4 options header\n", __func__)); m_freem(m); return EINVAL; } } break; #endif /* INET */ #ifdef INET6 case AF_INET6: /* Ugly... */ /* Copy and "cook" the IPv6 header. */ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6); /* We don't do IPv6 Jumbograms. */ if (ip6.ip6_plen == 0) { DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__)); m_freem(m); return EMSGSIZE; } ip6.ip6_flow = 0; ip6.ip6_hlim = 0; ip6.ip6_vfc &= ~IPV6_VERSION_MASK; ip6.ip6_vfc |= IPV6_VERSION; /* Scoped address handling. */ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src)) ip6.ip6_src.s6_addr16[1] = 0; if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst)) ip6.ip6_dst.s6_addr16[1] = 0; /* Done with IPv6 header. */ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6); /* Let's deal with the remaining headers (if any). */ if (skip - sizeof(struct ip6_hdr) > 0) { if (m->m_len <= skip) { ptr = (unsigned char *) malloc( skip - sizeof(struct ip6_hdr), M_XDATA, M_NOWAIT); if (ptr == NULL) { DPRINTF(("%s: failed to allocate memory" "for IPv6 headers\n",__func__)); m_freem(m); return ENOBUFS; } /* * Copy all the protocol headers after * the IPv6 header. */ m_copydata(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); alloc = 1; } else { /* No need to allocate memory. */ ptr = mtod(m, unsigned char *) + sizeof(struct ip6_hdr); alloc = 0; } } else break; nxt = ip6.ip6_nxt & 0xff; /* Next header type. */ for (off = 0; off < skip - sizeof(struct ip6_hdr);) switch (nxt) { case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: ip6e = (struct ip6_ext *)(ptr + off); noff = off + ((ip6e->ip6e_len + 1) << 3); /* Sanity check. */ if (noff > skip - sizeof(struct ip6_hdr)) goto error6; /* * Zero out mutable options. */ for (count = off + sizeof(struct ip6_ext); count < noff;) { if (ptr[count] == IP6OPT_PAD1) { count++; continue; /* Skip padding. */ } ad = ptr[count + 1] + 2; if (count + ad > noff) goto error6; if (ptr[count] & IP6OPT_MUTABLE) memset(ptr + count, 0, ad); count += ad; } if (count != noff) goto error6; /* Advance. */ off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; case IPPROTO_ROUTING: /* * Always include routing headers in * computation. */ ip6e = (struct ip6_ext *) (ptr + off); off += ((ip6e->ip6e_len + 1) << 3); nxt = ip6e->ip6e_nxt; break; default: DPRINTF(("%s: unexpected IPv6 header type %d", __func__, off)); error6: if (alloc) free(ptr, M_XDATA); m_freem(m); return EINVAL; } /* Copyback and free, if we allocated. */ if (alloc) { m_copyback(m, sizeof(struct ip6_hdr), skip - sizeof(struct ip6_hdr), ptr); free(ptr, M_XDATA); } break; #endif /* INET6 */ } return 0; } /* * ah_input() gets called to verify that an input packet * passes authentication. */ static int ah_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[128]); const struct auth_hash *ahx; struct cryptop *crp; struct xform_data *xd; struct newah *ah; crypto_session_t cryptoid; int hl, rplen, authsize, ahsize, error; uint32_t seqh; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->key_auth != NULL, ("null authentication key")); IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null authentication xform")); /* Figure out header size. */ rplen = HDRSIZE(sav); if (m->m_len < skip + rplen) { m = m_pullup(m, skip + rplen); if (m == NULL) { DPRINTF(("ah_input: cannot pullup header\n")); AHSTAT_INC(ahs_hdrops); /*XXX*/ error = ENOBUFS; goto bad; } } ah = (struct newah *)(mtod(m, caddr_t) + skip); /* Check replay window, if applicable. */ SECASVAR_LOCK(sav); if (sav->replay != NULL && sav->replay->wsize != 0 && ipsec_chkreplay(ntohl(ah->ah_seq), &seqh, sav) == 0) { SECASVAR_UNLOCK(sav); AHSTAT_INC(ahs_replay); DPRINTF(("%s: packet replay failure: %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); error = EACCES; goto bad; } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Verify AH header length. */ hl = sizeof(struct ah) + (ah->ah_len * sizeof (u_int32_t)); ahx = sav->tdb_authalgxform; authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); if (hl != ahsize) { DPRINTF(("%s: bad authenticator length %u (expecting %lu)" " for packet in SA %s/%08lx\n", __func__, hl, (u_long)ahsize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauthl); error = EACCES; goto bad; } if (skip + ahsize > m->m_pkthdr.len) { DPRINTF(("%s: bad mbuf length %u (expecting %lu)" " for packet in SA %s/%08lx\n", __func__, m->m_pkthdr.len, (u_long)(skip + ahsize), ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauthl); error = EACCES; goto bad; } AHSTAT_ADD(ahs_ibytes, m->m_pkthdr.len - skip - hl); /* Get crypto descriptors. */ crp = crypto_getreq(cryptoid, M_NOWAIT); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptor\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } crp->crp_payload_start = 0; crp->crp_payload_length = m->m_pkthdr.len; crp->crp_digest_start = skip + rplen; /* Allocate IPsec-specific opaque crypto info. */ xd = malloc(sizeof(*xd) + skip + rplen + authsize, M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); AHSTAT_INC(ahs_crypto); crypto_freereq(crp); error = ENOBUFS; goto bad; } /* * Save the authenticator, the skipped portion of the packet, * and the AH header. */ m_copydata(m, 0, skip + rplen + authsize, (caddr_t)(xd + 1)); /* Zeroize the authenticator on the packet. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* Save ah_nxt, since ah pointer can become invalid after "massage" */ hl = ah->ah_nxt; /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 0); if (error != 0) { /* NB: mbuf is free'd by ah_massage_headers */ AHSTAT_INC(ahs_hdrops); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); return (error); } /* Crypto operation descriptor. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; - if (V_async_crypto) - crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crypto_use_mbuf(crp, m); crp->crp_callback = ah_input_cb; crp->crp_opaque = xd; if (sav->flags & SADB_X_SAFLAGS_ESN && sav->replay != NULL && sav->replay->wsize != 0) { seqh = htonl(seqh); memcpy(crp->crp_esn, &seqh, sizeof(seqh)); } /* These are passed as-is to the callback. */ xd->sav = sav; xd->nxt = hl; xd->protoff = protoff; xd->skip = skip; xd->cryptoid = cryptoid; xd->vnet = curvnet; - return (crypto_dispatch(crp)); + if (V_async_crypto) + return (crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED)); + else + return (crypto_dispatch(crp)); bad: m_freem(m); key_freesav(&sav); return (error); } /* * AH input callback from the crypto driver. */ static int ah_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); unsigned char calc[AH_ALEN_MAX]; struct mbuf *m; struct xform_data *xd; struct secasvar *sav; struct secasindex *saidx; caddr_t ptr; crypto_session_t cryptoid; int authsize, rplen, ahsize, error, skip, protoff; uint8_t nxt; m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; skip = xd->skip; nxt = xd->nxt; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET || saidx->dst.sa.sa_family == AF_INET6, ("unexpected protocol family %u", saidx->dst.sa.sa_family)); /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } else { AHSTAT_INC(ahs_hist[sav->alg_auth]); crypto_freereq(crp); /* No longer needed. */ crp = NULL; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } /* Figure out header size. */ rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); /* Copy authenticator off the packet. */ m_copydata(m, skip + rplen, authsize, calc); /* Verify authenticator. */ ptr = (caddr_t) (xd + 1); if (timingsafe_bcmp(ptr + skip + rplen, calc, authsize)) { DPRINTF(("%s: authentication hash mismatch for packet " "in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_badauth); error = EACCES; goto bad; } /* Fix the Next Protocol field. */ ((uint8_t *) ptr)[protoff] = nxt; /* Copyback the saved (uncooked) network headers. */ m_copyback(m, 0, skip, ptr); free(xd, M_XDATA), xd = NULL; /* No longer needed */ /* * Header is now authenticated. */ m->m_flags |= M_AUTHIPHDR|M_AUTHIPDGM; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newah, ah_seq), sizeof (seq), (caddr_t) &seq); SECASVAR_LOCK(sav); if (ipsec_updatereplay(ntohl(seq), sav)) { SECASVAR_UNLOCK(sav); AHSTAT_INC(ahs_replay); error = EACCES; goto bad; } SECASVAR_UNLOCK(sav); } /* * Remove the AH header and authenticator from the mbuf. */ error = m_striphdr(m, skip, ahsize); if (error) { DPRINTF(("%s: mangled mbuf chain for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); goto bad; } switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } CURVNET_RESTORE(); return error; bad: CURVNET_RESTORE(); if (sav) key_freesav(&sav); if (m != NULL) m_freem(m); if (xd != NULL) free(xd, M_XDATA); if (crp != NULL) crypto_freereq(crp); return error; } /* * AH output routine, called by ipsec[46]_perform_request(). */ static int ah_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); const struct auth_hash *ahx; struct xform_data *xd; struct mbuf *mi; struct cryptop *crp; struct newah *ah; crypto_session_t cryptoid; uint16_t iplen; int error, rplen, authsize, ahsize, maxpacketsize, roff; uint8_t prot; uint32_t seqh; IPSEC_ASSERT(sav != NULL, ("null SA")); ahx = sav->tdb_authalgxform; IPSEC_ASSERT(ahx != NULL, ("null authentication xform")); AHSTAT_INC(ahs_output); /* Figure out header size. */ rplen = HDRSIZE(sav); authsize = AUTHSIZE(sav); ahsize = ah_hdrsiz(sav); /* Check for maximum packet size violations. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol family %u, " "SA %s/%08lx\n", __func__, sav->sah->saidx.dst.sa.sa_family, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_nopf); error = EPFNOSUPPORT; goto bad; } if (ahsize + m->m_pkthdr.len > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), ahsize + m->m_pkthdr.len, maxpacketsize)); AHSTAT_INC(ahs_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ AHSTAT_ADD(ahs_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); error = ENOBUFS; goto bad; } /* Inject AH header. */ mi = m_makespace(m, skip, ahsize, &roff); if (mi == NULL) { DPRINTF(("%s: failed to inject %u byte AH header for SA " "%s/%08lx\n", __func__, ahsize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_hdrops); /*XXX differs from openbsd */ error = ENOBUFS; goto bad; } /* * The AH header is guaranteed by m_makespace() to be in * contiguous memory, at roff bytes offset into the returned mbuf. */ ah = (struct newah *)(mtod(mi, caddr_t) + roff); /* Initialize the AH header. */ m_copydata(m, protoff, sizeof(u_int8_t), (caddr_t) &ah->ah_nxt); ah->ah_len = (ahsize - sizeof(struct ah)) / sizeof(u_int32_t); ah->ah_reserve = 0; ah->ah_spi = sav->spi; /* Zeroize authenticator. */ m_copyback(m, skip + rplen, authsize, ipseczeroes); /* Zeroize padding */ m_copyback(m, skip + rplen + authsize, ahsize - (rplen + authsize), ipseczeroes); /* Insert packet replay counter, as requested. */ SECASVAR_LOCK(sav); if (sav->replay) { if ((sav->replay->count == ~0 || (!(sav->flags & SADB_X_SAFLAGS_ESN) && ((uint32_t)sav->replay->count) == ~0)) && (sav->flags & SADB_X_EXT_CYCSEQ) == 0) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: replay counter wrapped for SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); AHSTAT_INC(ahs_wrap); error = EACCES; goto bad; } #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; ah->ah_seq = htonl((uint32_t)sav->replay->count); } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Get crypto descriptors. */ crp = crypto_getreq(cryptoid, M_NOWAIT); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } crp->crp_payload_start = 0; crp->crp_payload_length = m->m_pkthdr.len; crp->crp_digest_start = skip + rplen; /* Allocate IPsec-specific opaque crypto info. */ xd = malloc(sizeof(struct xform_data) + skip, M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { crypto_freereq(crp); DPRINTF(("%s: failed to allocate xform_data\n", __func__)); AHSTAT_INC(ahs_crypto); error = ENOBUFS; goto bad; } /* Save the skipped portion of the packet. */ m_copydata(m, 0, skip, (caddr_t) (xd + 1)); /* * Fix IP header length on the header used for * authentication. We don't need to fix the original * header length as it will be fixed by our caller. */ switch (sav->sah->saidx.dst.sa.sa_family) { #ifdef INET case AF_INET: bcopy(((caddr_t)(xd + 1)) + offsetof(struct ip, ip_len), (caddr_t) &iplen, sizeof(u_int16_t)); iplen = htons(ntohs(iplen) + ahsize); m_copyback(m, offsetof(struct ip, ip_len), sizeof(u_int16_t), (caddr_t) &iplen); break; #endif /* INET */ #ifdef INET6 case AF_INET6: bcopy(((caddr_t)(xd + 1)) + offsetof(struct ip6_hdr, ip6_plen), (caddr_t) &iplen, sizeof(uint16_t)); iplen = htons(ntohs(iplen) + ahsize); m_copyback(m, offsetof(struct ip6_hdr, ip6_plen), sizeof(uint16_t), (caddr_t) &iplen); break; #endif /* INET6 */ } /* Fix the Next Header field in saved header. */ ((uint8_t *) (xd + 1))[protoff] = IPPROTO_AH; /* Update the Next Protocol field in the IP header. */ prot = IPPROTO_AH; m_copyback(m, protoff, sizeof(uint8_t), (caddr_t) &prot); /* "Massage" the packet headers for crypto processing. */ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family, skip, ahx->type, 1); if (error != 0) { m = NULL; /* mbuf was free'd by ah_massage_headers. */ free(xd, M_XDATA); crypto_freereq(crp); goto bad; } /* Crypto operation descriptor. */ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST; crp->crp_flags = CRYPTO_F_CBIFSYNC; - if (V_async_crypto) - crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crypto_use_mbuf(crp, m); crp->crp_callback = ah_output_cb; crp->crp_opaque = xd; if (sav->flags & SADB_X_SAFLAGS_ESN && sav->replay != NULL) { seqh = htonl((uint32_t)(sav->replay->count >> IPSEC_SEQH_SHIFT)); memcpy(crp->crp_esn, &seqh, sizeof(seqh)); } /* These are passed as-is to the callback. */ xd->sp = sp; xd->sav = sav; xd->skip = skip; xd->idx = idx; xd->cryptoid = cryptoid; xd->vnet = curvnet; - return crypto_dispatch(crp); + if (V_async_crypto) + return (crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED)); + else + return (crypto_dispatch(crp)); bad: if (m) m_freem(m); key_freesav(&sav); key_freesp(&sp); return (error); } /* * AH output callback from the crypto driver. */ static int ah_output_cb(struct cryptop *crp) { struct xform_data *xd; struct secpolicy *sp; struct secasvar *sav; struct mbuf *m; crypto_session_t cryptoid; caddr_t ptr; u_int idx; int skip, error; m = crp->crp_buf.cb_mbuf; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); sp = xd->sp; sav = xd->sav; skip = xd->skip; idx = xd->idx; cryptoid = xd->cryptoid; ptr = (caddr_t) (xd + 1); /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } AHSTAT_INC(ahs_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; m_freem(m); goto bad; } /* Shouldn't happen... */ if (m == NULL) { AHSTAT_INC(ahs_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } /* * Copy original headers (with the new protocol number) back * in place. */ m_copyback(m, 0, skip, ptr); free(xd, M_XDATA); crypto_freereq(crp); AHSTAT_INC(ahs_hist[sav->alg_auth]); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { int alen; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ alen = AUTHSIZE(sav); m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, sp, sav, idx); CURVNET_RESTORE(); return (error); bad: CURVNET_RESTORE(); free(xd, M_XDATA); crypto_freereq(crp); key_freesav(&sav); key_freesp(&sp); return (error); } static struct xformsw ah_xformsw = { .xf_type = XF_AH, .xf_name = "IPsec AH", .xf_init = ah_init, .xf_cleanup = ah_cleanup, .xf_input = ah_input, .xf_output = ah_output, }; SYSINIT(ah_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_attach, &ah_xformsw); SYSUNINIT(ah_xform_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_detach, &ah_xformsw); diff --git a/sys/netipsec/xform_esp.c b/sys/netipsec/xform_esp.c index dc64dc732992..a7d5776e4da2 100644 --- a/sys/netipsec/xform_esp.c +++ b/sys/netipsec/xform_esp.c @@ -1,1062 +1,1064 @@ /* $FreeBSD$ */ /* $OpenBSD: ip_esp.c,v 1.69 2001/06/26 06:18:59 angelos Exp $ */ /*- * The authors of this code are John Ioannidis (ji@tla.org), * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * * The original version of this code was written by John Ioannidis * for BSD/OS in Athens, Greece, in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, * by Angelos D. Keromytis. * * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis * and Niels Provos. * * Additional features in 1999 by Angelos D. Keromytis. * * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis, * Angelos D. Keromytis and Niels Provos. * Copyright (c) 2001 Angelos D. Keromytis. * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to * all. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET6 #include #include #include #endif #include #include #include #include #define SPI_SIZE 4 VNET_DEFINE(int, esp_enable) = 1; VNET_DEFINE_STATIC(int, esp_ctr_compatibility) = 1; #define V_esp_ctr_compatibility VNET(esp_ctr_compatibility) VNET_PCPUSTAT_DEFINE(struct espstat, espstat); VNET_PCPUSTAT_SYSINIT(espstat); #ifdef VIMAGE VNET_PCPUSTAT_SYSUNINIT(espstat); #endif /* VIMAGE */ SYSCTL_DECL(_net_inet_esp); SYSCTL_INT(_net_inet_esp, OID_AUTO, esp_enable, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(esp_enable), 0, ""); SYSCTL_INT(_net_inet_esp, OID_AUTO, ctr_compatibility, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(esp_ctr_compatibility), 0, "Align AES-CTR encrypted transmitted frames to blocksize"); SYSCTL_VNET_PCPUSTAT(_net_inet_esp, IPSECCTL_STATS, stats, struct espstat, espstat, "ESP statistics (struct espstat, netipsec/esp_var.h"); static int esp_input_cb(struct cryptop *op); static int esp_output_cb(struct cryptop *crp); size_t esp_hdrsiz(struct secasvar *sav) { size_t size; if (sav != NULL) { /*XXX not right for null algorithm--does it matter??*/ IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("SA with null xform")); if (sav->flags & SADB_X_EXT_OLD) size = sizeof (struct esp); else size = sizeof (struct newesp); size += sav->tdb_encalgxform->blocksize + 9; /*XXX need alg check???*/ if (sav->tdb_authalgxform != NULL && sav->replay) size += ah_hdrsiz(sav); } else { /* * base header size * + max iv length for CBC mode * + max pad length * + sizeof (pad length field) * + sizeof (next header field) * + max icv supported. */ size = sizeof (struct newesp) + EALG_MAX_BLOCK_LEN + 9 + 16; } return size; } /* * esp_init() is called when an SPI is being set up. */ static int esp_init(struct secasvar *sav, struct xformsw *xsp) { const struct enc_xform *txform; struct crypto_session_params csp; int keylen; int error; txform = enc_algorithm_lookup(sav->alg_enc); if (txform == NULL) { DPRINTF(("%s: unsupported encryption algorithm %d\n", __func__, sav->alg_enc)); return EINVAL; } if (sav->key_enc == NULL) { DPRINTF(("%s: no encoding key for %s algorithm\n", __func__, txform->name)); return EINVAL; } if ((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) { DPRINTF(("%s: 4-byte IV not supported with protocol\n", __func__)); return EINVAL; } /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */ keylen = _KEYLEN(sav->key_enc) - SAV_ISCTRORGCM(sav) * 4; if (txform->minkey > keylen || keylen > txform->maxkey) { DPRINTF(("%s: invalid key length %u, must be in the range " "[%u..%u] for algorithm %s\n", __func__, keylen, txform->minkey, txform->maxkey, txform->name)); return EINVAL; } if (SAV_ISCTRORGCM(sav)) sav->ivlen = 8; /* RFC4106 3.1 and RFC3686 3.1 */ else sav->ivlen = txform->ivsize; memset(&csp, 0, sizeof(csp)); /* * Setup AH-related state. */ if (sav->alg_auth != 0) { error = ah_init0(sav, xsp, &csp); if (error) return error; } /* NB: override anything set in ah_init0 */ sav->tdb_xform = xsp; sav->tdb_encalgxform = txform; /* * Whenever AES-GCM is used for encryption, one * of the AES authentication algorithms is chosen * as well, based on the key size. */ if (sav->alg_enc == SADB_X_EALG_AESGCM16) { switch (keylen) { case AES_128_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES128GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_128; break; case AES_192_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES192GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_192; break; case AES_256_GMAC_KEY_LEN: sav->alg_auth = SADB_X_AALG_AES256GMAC; sav->tdb_authalgxform = &auth_hash_nist_gmac_aes_256; break; default: DPRINTF(("%s: invalid key length %u" "for algorithm %s\n", __func__, keylen, txform->name)); return EINVAL; } csp.csp_mode = CSP_MODE_AEAD; if (sav->flags & SADB_X_SAFLAGS_ESN) csp.csp_flags |= CSP_F_SEPARATE_AAD; } else if (sav->alg_auth != 0) { csp.csp_mode = CSP_MODE_ETA; if (sav->flags & SADB_X_SAFLAGS_ESN) csp.csp_flags |= CSP_F_ESN; } else csp.csp_mode = CSP_MODE_CIPHER; /* Initialize crypto session. */ csp.csp_cipher_alg = sav->tdb_encalgxform->type; if (csp.csp_cipher_alg != CRYPTO_NULL_CBC) { csp.csp_cipher_key = sav->key_enc->key_data; csp.csp_cipher_klen = _KEYBITS(sav->key_enc) / 8 - SAV_ISCTRORGCM(sav) * 4; }; csp.csp_ivlen = txform->ivsize; error = crypto_newsession(&sav->tdb_cryptoid, &csp, V_crypto_support); return error; } static void esp_cleanup(struct secasvar *sav) { crypto_freesession(sav->tdb_cryptoid); sav->tdb_cryptoid = NULL; sav->tdb_authalgxform = NULL; sav->tdb_encalgxform = NULL; } /* * ESP input processing, called (eventually) through the protocol switch. */ static int esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[128]); const struct auth_hash *esph; const struct enc_xform *espx; struct xform_data *xd; struct cryptop *crp; struct newesp *esp; uint8_t *ivp; crypto_session_t cryptoid; int alen, error, hlen, plen; uint32_t seqh; const struct crypto_session_params *csp; IPSEC_ASSERT(sav != NULL, ("null SA")); IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform")); error = EINVAL; /* Valid IP Packet length ? */ if ( (skip&3) || (m->m_pkthdr.len&3) ){ DPRINTF(("%s: misaligned packet, skip %u pkt len %u", __func__, skip, m->m_pkthdr.len)); ESPSTAT_INC(esps_badilen); goto bad; } if (m->m_len < skip + sizeof(*esp)) { m = m_pullup(m, skip + sizeof(*esp)); if (m == NULL) { DPRINTF(("%s: cannot pullup header\n", __func__)); ESPSTAT_INC(esps_hdrops); /*XXX*/ error = ENOBUFS; goto bad; } } esp = (struct newesp *)(mtod(m, caddr_t) + skip); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; /* Determine the ESP header and auth length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; alen = xform_ah_authsize(esph); /* * Verify payload length is multiple of encryption algorithm * block size. * * NB: This works for the null algorithm because the blocksize * is 4 and all packets must be 4-byte aligned regardless * of the algorithm. */ plen = m->m_pkthdr.len - (skip + hlen + alen); if ((plen & (espx->blocksize - 1)) || (plen <= 0)) { DPRINTF(("%s: payload of %d octets not a multiple of %d octets," " SA %s/%08lx\n", __func__, plen, espx->blocksize, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long)ntohl(sav->spi))); ESPSTAT_INC(esps_badilen); goto bad; } /* * Check sequence number. */ SECASVAR_LOCK(sav); if (esph != NULL && sav->replay != NULL && sav->replay->wsize != 0) { if (ipsec_chkreplay(ntohl(esp->esp_seq), &seqh, sav) == 0) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = EACCES; goto bad; } seqh = htonl(seqh); } cryptoid = sav->tdb_cryptoid; SECASVAR_UNLOCK(sav); /* Update the counters */ ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen)); /* Get crypto descriptors */ crp = crypto_getreq(cryptoid, M_NOWAIT); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptors\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } /* Get IPsec-specific opaque pointer */ xd = malloc(sizeof(*xd), M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); goto xd_fail; } if (esph != NULL) { crp->crp_op = CRYPTO_OP_VERIFY_DIGEST; if (SAV_ISGCM(sav)) crp->crp_aad_length = 8; /* RFC4106 5, SPI + SN */ else crp->crp_aad_length = hlen; csp = crypto_get_params(crp->crp_session); if ((csp->csp_flags & CSP_F_SEPARATE_AAD) && (sav->replay != NULL) && (sav->replay->wsize != 0)) { int aad_skip; crp->crp_aad_length += sizeof(seqh); crp->crp_aad = malloc(crp->crp_aad_length, M_XDATA, M_NOWAIT); if (crp->crp_aad == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); goto crp_aad_fail; } /* SPI */ m_copydata(m, skip, SPI_SIZE, crp->crp_aad); aad_skip = SPI_SIZE; /* ESN */ bcopy(&seqh, (char *)crp->crp_aad + aad_skip, sizeof(seqh)); aad_skip += sizeof(seqh); /* Rest of aad */ if (crp->crp_aad_length - aad_skip > 0) m_copydata(m, skip + SPI_SIZE, crp->crp_aad_length - aad_skip, (char *)crp->crp_aad + aad_skip); } else crp->crp_aad_start = skip; if (csp->csp_flags & CSP_F_ESN && sav->replay != NULL && sav->replay->wsize != 0) memcpy(crp->crp_esn, &seqh, sizeof(seqh)); crp->crp_digest_start = m->m_pkthdr.len - alen; } /* Crypto operation descriptor */ crp->crp_flags = CRYPTO_F_CBIFSYNC; - if (V_async_crypto) - crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crypto_use_mbuf(crp, m); crp->crp_callback = esp_input_cb; crp->crp_opaque = xd; /* These are passed as-is to the callback */ xd->sav = sav; xd->protoff = protoff; xd->skip = skip; xd->cryptoid = cryptoid; xd->vnet = curvnet; /* Decryption descriptor */ crp->crp_op |= CRYPTO_OP_DECRYPT; crp->crp_payload_start = skip + hlen; crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen + alen); /* Generate or read cipher IV. */ if (SAV_ISCTRORGCM(sav)) { ivp = &crp->crp_iv[0]; /* * AES-GCM and AES-CTR use similar cipher IV formats * defined in RFC 4106 section 4 and RFC 3686 section * 4, respectively. * * The first 4 bytes of the cipher IV contain an * implicit salt, or nonce, obtained from the last 4 * bytes of the encryption key. The next 8 bytes hold * an explicit IV unique to each packet. This * explicit IV is used as the ESP IV for the packet. * The last 4 bytes hold a big-endian block counter * incremented for each block. For AES-GCM, the block * counter's initial value is defined as part of the * algorithm. For AES-CTR, the block counter's * initial value for each packet is defined as 1 by * RFC 3686. * * ------------------------------------------ * | Salt | Explicit ESP IV | Block Counter | * ------------------------------------------ * 4 bytes 8 bytes 4 bytes */ memcpy(ivp, sav->key_enc->key_data + _KEYLEN(sav->key_enc) - 4, 4); m_copydata(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]); if (SAV_ISCTR(sav)) { be32enc(&ivp[sav->ivlen + 4], 1); } crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else if (sav->ivlen != 0) crp->crp_iv_start = skip + hlen - sav->ivlen; - return (crypto_dispatch(crp)); + if (V_async_crypto) + return (crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED)); + else + return (crypto_dispatch(crp)); crp_aad_fail: free(xd, M_XDATA); xd_fail: crypto_freereq(crp); ESPSTAT_INC(esps_crypto); error = ENOBUFS; bad: m_freem(m); key_freesav(&sav); return (error); } /* * ESP input callback from the crypto driver. */ static int esp_input_cb(struct cryptop *crp) { IPSEC_DEBUG_DECLARE(char buf[128]); uint8_t lastthree[3]; const struct auth_hash *esph; struct mbuf *m; struct xform_data *xd; struct secasvar *sav; struct secasindex *saidx; crypto_session_t cryptoid; int hlen, skip, protoff, error, alen; m = crp->crp_buf.cb_mbuf; xd = crp->crp_opaque; CURVNET_SET(xd->vnet); sav = xd->sav; skip = xd->skip; protoff = xd->protoff; cryptoid = xd->cryptoid; saidx = &sav->sah->saidx; esph = sav->tdb_authalgxform; /* Check for crypto errors */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } /* EBADMSG indicates authentication failure. */ if (!(crp->crp_etype == EBADMSG && esph != NULL)) { ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; goto bad; } } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } ESPSTAT_INC(esps_hist[sav->alg_enc]); /* If authentication was performed, check now. */ if (esph != NULL) { alen = xform_ah_authsize(esph); AHSTAT_INC(ahs_hist[sav->alg_auth]); if (crp->crp_etype == EBADMSG) { DPRINTF(("%s: authentication hash mismatch for " "packet in SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_badauth); error = EACCES; goto bad; } m->m_flags |= M_AUTHIPDGM; /* Remove trailing authenticator */ m_adj(m, -alen); } /* Release the crypto descriptors */ free(xd, M_XDATA), xd = NULL; free(crp->crp_aad, M_XDATA), crp->crp_aad = NULL; crypto_freereq(crp), crp = NULL; /* * Packet is now decrypted. */ m->m_flags |= M_DECRYPTED; /* * Update replay sequence number, if appropriate. */ if (sav->replay) { u_int32_t seq; m_copydata(m, skip + offsetof(struct newesp, esp_seq), sizeof (seq), (caddr_t) &seq); SECASVAR_LOCK(sav); if (ipsec_updatereplay(ntohl(seq), sav)) { SECASVAR_UNLOCK(sav); DPRINTF(("%s: packet replay check for %s\n", __func__, ipsec_sa2str(sav, buf, sizeof(buf)))); ESPSTAT_INC(esps_replay); error = EACCES; goto bad; } SECASVAR_UNLOCK(sav); } /* Determine the ESP header length */ if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; /* Remove the ESP header and IV from the mbuf. */ error = m_striphdr(m, skip, hlen); if (error) { ESPSTAT_INC(esps_hdrops); DPRINTF(("%s: bad mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); goto bad; } /* Save the last three bytes of decrypted data */ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree); /* Verify pad length */ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) { ESPSTAT_INC(esps_badilen); DPRINTF(("%s: invalid padding length %d for %u byte packet " "in SA %s/%08lx\n", __func__, lastthree[1], m->m_pkthdr.len - skip, ipsec_address(&sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } /* Verify correct decryption by checking the last padding bytes */ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) { if (lastthree[1] != lastthree[0] && lastthree[1] != 0) { ESPSTAT_INC(esps_badenc); DPRINTF(("%s: decryption failed for packet in " "SA %s/%08lx\n", __func__, ipsec_address( &sav->sah->saidx.dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); error = EINVAL; goto bad; } } /* * RFC4303 2.6: * Silently drop packet if next header field is IPPROTO_NONE. */ if (lastthree[2] == IPPROTO_NONE) goto bad; /* Trim the mbuf chain to remove trailing authenticator and padding */ m_adj(m, -(lastthree[1] + 2)); /* Restore the Next Protocol field */ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2); switch (saidx->dst.sa.sa_family) { #ifdef INET6 case AF_INET6: error = ipsec6_common_input_cb(m, sav, skip, protoff); break; #endif #ifdef INET case AF_INET: error = ipsec4_common_input_cb(m, sav, skip, protoff); break; #endif default: panic("%s: Unexpected address family: %d saidx=%p", __func__, saidx->dst.sa.sa_family, saidx); } CURVNET_RESTORE(); return error; bad: CURVNET_RESTORE(); if (sav != NULL) key_freesav(&sav); if (m != NULL) m_freem(m); if (xd != NULL) free(xd, M_XDATA); if (crp != NULL) { free(crp->crp_aad, M_XDATA); crypto_freereq(crp); } return error; } /* * ESP output routine, called by ipsec[46]_perform_request(). */ static int esp_output(struct mbuf *m, struct secpolicy *sp, struct secasvar *sav, u_int idx, int skip, int protoff) { IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]); struct cryptop *crp; const struct auth_hash *esph; const struct enc_xform *espx; struct mbuf *mo = NULL; struct xform_data *xd; struct secasindex *saidx; unsigned char *pad; uint8_t *ivp; uint64_t cntr; crypto_session_t cryptoid; int hlen, rlen, padding, blks, alen, i, roff; int error, maxpacketsize; uint8_t prot; uint32_t seqh; const struct crypto_session_params *csp; IPSEC_ASSERT(sav != NULL, ("null SA")); esph = sav->tdb_authalgxform; espx = sav->tdb_encalgxform; IPSEC_ASSERT(espx != NULL, ("null encoding xform")); if (sav->flags & SADB_X_EXT_OLD) hlen = sizeof (struct esp) + sav->ivlen; else hlen = sizeof (struct newesp) + sav->ivlen; rlen = m->m_pkthdr.len - skip; /* Raw payload length. */ /* * RFC4303 2.4 Requires 4 byte alignment. * Old versions of FreeBSD can't decrypt partial blocks encrypted * with AES-CTR. Align payload to native_blocksize (16 bytes) * in order to preserve compatibility. */ if (SAV_ISCTR(sav) && V_esp_ctr_compatibility) blks = MAX(4, espx->native_blocksize); /* Cipher blocksize */ else blks = MAX(4, espx->blocksize); /* XXX clamp padding length a la KAME??? */ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2; alen = xform_ah_authsize(esph); ESPSTAT_INC(esps_output); saidx = &sav->sah->saidx; /* Check for maximum packet size violations. */ switch (saidx->dst.sa.sa_family) { #ifdef INET case AF_INET: maxpacketsize = IP_MAXPACKET; break; #endif /* INET */ #ifdef INET6 case AF_INET6: maxpacketsize = IPV6_MAXPACKET; break; #endif /* INET6 */ default: DPRINTF(("%s: unknown/unsupported protocol " "family %d, SA %s/%08lx\n", __func__, saidx->dst.sa.sa_family, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_nopf); error = EPFNOSUPPORT; goto bad; } /* DPRINTF(("%s: skip %d hlen %d rlen %d padding %d alen %d blksd %d\n", __func__, skip, hlen, rlen, padding, alen, blks)); */ if (skip + hlen + rlen + padding + alen > maxpacketsize) { DPRINTF(("%s: packet in SA %s/%08lx got too big " "(len %u, max len %u)\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi), skip + hlen + rlen + padding + alen, maxpacketsize)); ESPSTAT_INC(esps_toobig); error = EMSGSIZE; goto bad; } /* Update the counters. */ ESPSTAT_ADD(esps_obytes, m->m_pkthdr.len - skip); m = m_unshare(m, M_NOWAIT); if (m == NULL) { DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); error = ENOBUFS; goto bad; } /* Inject ESP header. */ mo = m_makespace(m, skip, hlen, &roff); if (mo == NULL) { DPRINTF(("%s: %u byte ESP hdr inject failed for SA %s/%08lx\n", __func__, hlen, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); ESPSTAT_INC(esps_hdrops); /* XXX diffs from openbsd */ error = ENOBUFS; goto bad; } /* Initialize ESP header. */ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(uint32_t)); SECASVAR_LOCK(sav); if (sav->replay) { uint32_t replay; #ifdef REGRESSION /* Emulate replay attack when ipsec_replay is TRUE. */ if (!V_ipsec_replay) #endif sav->replay->count++; replay = htonl((uint32_t)sav->replay->count); bcopy((caddr_t) &replay, mtod(mo, caddr_t) + roff + sizeof(uint32_t), sizeof(uint32_t)); seqh = htonl((uint32_t)(sav->replay->count >> IPSEC_SEQH_SHIFT)); } cryptoid = sav->tdb_cryptoid; if (SAV_ISCTRORGCM(sav)) cntr = sav->cntr++; SECASVAR_UNLOCK(sav); /* * Add padding -- better to do it ourselves than use the crypto engine, * although if/when we support compression, we'd have to do that. */ pad = (u_char *) m_pad(m, padding + alen); if (pad == NULL) { DPRINTF(("%s: m_pad failed for SA %s/%08lx\n", __func__, ipsec_address(&saidx->dst, buf, sizeof(buf)), (u_long) ntohl(sav->spi))); m = NULL; /* NB: free'd by m_pad */ error = ENOBUFS; goto bad; } /* * Add padding: random, zero, or self-describing. * XXX catch unexpected setting */ switch (sav->flags & SADB_X_EXT_PMASK) { case SADB_X_EXT_PRAND: arc4random_buf(pad, padding - 2); break; case SADB_X_EXT_PZERO: bzero(pad, padding - 2); break; case SADB_X_EXT_PSEQ: for (i = 0; i < padding - 2; i++) pad[i] = i+1; break; } /* Fix padding length and Next Protocol in padding itself. */ pad[padding - 2] = padding - 2; m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1); /* Fix Next Protocol in IPv4/IPv6 header. */ prot = IPPROTO_ESP; m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot); /* Get crypto descriptor. */ crp = crypto_getreq(cryptoid, M_NOWAIT); if (crp == NULL) { DPRINTF(("%s: failed to acquire crypto descriptor\n", __func__)); ESPSTAT_INC(esps_crypto); error = ENOBUFS; goto bad; } /* IPsec-specific opaque crypto info. */ xd = malloc(sizeof(struct xform_data), M_XDATA, M_NOWAIT | M_ZERO); if (xd == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); goto xd_fail; } /* Encryption descriptor. */ crp->crp_payload_start = skip + hlen; crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen + alen); crp->crp_op = CRYPTO_OP_ENCRYPT; /* Generate cipher and ESP IVs. */ ivp = &crp->crp_iv[0]; if (SAV_ISCTRORGCM(sav)) { /* * See comment in esp_input() for details on the * cipher IV. A simple per-SA counter stored in * 'cntr' is used as the explicit ESP IV. */ memcpy(ivp, sav->key_enc->key_data + _KEYLEN(sav->key_enc) - 4, 4); be64enc(&ivp[4], cntr); if (SAV_ISCTR(sav)) { be32enc(&ivp[sav->ivlen + 4], 1); } m_copyback(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]); crp->crp_flags |= CRYPTO_F_IV_SEPARATE; } else if (sav->ivlen != 0) { arc4rand(ivp, sav->ivlen, 0); crp->crp_iv_start = skip + hlen - sav->ivlen; m_copyback(m, crp->crp_iv_start, sav->ivlen, ivp); } /* Callback parameters */ xd->sp = sp; xd->sav = sav; xd->idx = idx; xd->cryptoid = cryptoid; xd->vnet = curvnet; /* Crypto operation descriptor. */ crp->crp_flags |= CRYPTO_F_CBIFSYNC; - if (V_async_crypto) - crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER; crypto_use_mbuf(crp, m); crp->crp_callback = esp_output_cb; crp->crp_opaque = xd; if (esph) { /* Authentication descriptor. */ crp->crp_op |= CRYPTO_OP_COMPUTE_DIGEST; if (SAV_ISGCM(sav)) crp->crp_aad_length = 8; /* RFC4106 5, SPI + SN */ else crp->crp_aad_length = hlen; csp = crypto_get_params(crp->crp_session); if (csp->csp_flags & CSP_F_SEPARATE_AAD && sav->replay != NULL) { int aad_skip; crp->crp_aad_length += sizeof(seqh); crp->crp_aad = malloc(crp->crp_aad_length, M_XDATA, M_NOWAIT); if (crp->crp_aad == NULL) { DPRINTF(("%s: failed to allocate xform_data\n", __func__)); goto crp_aad_fail; } /* SPI */ m_copydata(m, skip, SPI_SIZE, crp->crp_aad); aad_skip = SPI_SIZE; /* ESN */ bcopy(&seqh, (char *)crp->crp_aad + aad_skip, sizeof(seqh)); aad_skip += sizeof(seqh); /* Rest of aad */ if (crp->crp_aad_length - aad_skip > 0) m_copydata(m, skip + SPI_SIZE, crp->crp_aad_length - aad_skip, (char *)crp->crp_aad + aad_skip); } else crp->crp_aad_start = skip; if (csp->csp_flags & CSP_F_ESN && sav->replay != NULL) memcpy(crp->crp_esn, &seqh, sizeof(seqh)); crp->crp_digest_start = m->m_pkthdr.len - alen; } - return crypto_dispatch(crp); + if (V_async_crypto) + return (crypto_dispatch_async(crp, CRYPTO_ASYNC_ORDERED)); + else + return (crypto_dispatch(crp)); crp_aad_fail: free(xd, M_XDATA); xd_fail: crypto_freereq(crp); ESPSTAT_INC(esps_crypto); error = ENOBUFS; bad: if (m) m_freem(m); key_freesav(&sav); key_freesp(&sp); return (error); } /* * ESP output callback from the crypto driver. */ static int esp_output_cb(struct cryptop *crp) { struct xform_data *xd; struct secpolicy *sp; struct secasvar *sav; struct mbuf *m; crypto_session_t cryptoid; u_int idx; int error; xd = (struct xform_data *) crp->crp_opaque; CURVNET_SET(xd->vnet); m = crp->crp_buf.cb_mbuf; sp = xd->sp; sav = xd->sav; idx = xd->idx; cryptoid = xd->cryptoid; /* Check for crypto errors. */ if (crp->crp_etype) { if (crp->crp_etype == EAGAIN) { /* Reset the session ID */ if (ipsec_updateid(sav, &crp->crp_session, &cryptoid) != 0) crypto_freesession(cryptoid); xd->cryptoid = crp->crp_session; CURVNET_RESTORE(); return (crypto_dispatch(crp)); } ESPSTAT_INC(esps_noxform); DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype)); error = crp->crp_etype; m_freem(m); goto bad; } /* Shouldn't happen... */ if (m == NULL) { ESPSTAT_INC(esps_crypto); DPRINTF(("%s: bogus returned buffer from crypto\n", __func__)); error = EINVAL; goto bad; } free(xd, M_XDATA); free(crp->crp_aad, M_XDATA); crypto_freereq(crp); ESPSTAT_INC(esps_hist[sav->alg_enc]); if (sav->tdb_authalgxform != NULL) AHSTAT_INC(ahs_hist[sav->alg_auth]); #ifdef REGRESSION /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */ if (V_ipsec_integrity) { static unsigned char ipseczeroes[AH_HMAC_MAXHASHLEN]; const struct auth_hash *esph; /* * Corrupt HMAC if we want to test integrity verification of * the other side. */ esph = sav->tdb_authalgxform; if (esph != NULL) { int alen; alen = xform_ah_authsize(esph); m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes); } } #endif /* NB: m is reclaimed by ipsec_process_done. */ error = ipsec_process_done(m, sp, sav, idx); CURVNET_RESTORE(); return (error); bad: CURVNET_RESTORE(); free(xd, M_XDATA); free(crp->crp_aad, M_XDATA); crypto_freereq(crp); key_freesav(&sav); key_freesp(&sp); return (error); } static struct xformsw esp_xformsw = { .xf_type = XF_ESP, .xf_name = "IPsec ESP", .xf_init = esp_init, .xf_cleanup = esp_cleanup, .xf_input = esp_input, .xf_output = esp_output, }; SYSINIT(esp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_attach, &esp_xformsw); SYSUNINIT(esp_xform_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, xform_detach, &esp_xformsw); diff --git a/sys/opencrypto/crypto.c b/sys/opencrypto/crypto.c index 0316eb35361a..3b489739f067 100644 --- a/sys/opencrypto/crypto.c +++ b/sys/opencrypto/crypto.c @@ -1,2284 +1,2308 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; uint32_t cc_sessions; /* (d) # of sessions */ uint32_t cc_koperations; /* (d) # os asym operations */ uint8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; struct crypto_session_params csp; uint64_t id; /* Driver softc follows. */ }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ uint32_t reorder_ops; /* total ordered sym jobs received */ uint32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) -#define CRYPTO_RETW_EMPTY(w) \ - (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, cryptostats, nitems(cryptostats), "Crypto system statistics"); #define CRYPTOSTAT_INC(stat) do { \ counter_u64_add( \ cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 1); \ } while (0) static void cryptostats_init(void *arg __unused) { COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); } SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); static void cryptostats_fini(void *arg __unused) { COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); } SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, NULL); /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof(struct cryptop), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &crypto_tq); taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = mallocarray(crypto_workers_num, sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); explicit_bzero(hmac_key, sizeof(hmac_key)); } void hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session + 1); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); default: return (NULL); } } static struct cryptocap * crypto_checkdriver(uint32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static enum alg_type { ALG_NONE = 0, ALG_CIPHER, ALG_DIGEST, ALG_KEYED_DIGEST, ALG_COMPRESSION, ALG_AEAD } alg_types[] = { [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CBC] = ALG_CIPHER, [CRYPTO_SHA1] = ALG_DIGEST, [CRYPTO_NULL_HMAC] = ALG_DIGEST, [CRYPTO_NULL_CBC] = ALG_CIPHER, [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, [CRYPTO_AES_XTS] = ALG_CIPHER, [CRYPTO_AES_ICM] = ALG_CIPHER, [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, [CRYPTO_CHACHA20] = ALG_CIPHER, [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, [CRYPTO_RIPEMD160] = ALG_DIGEST, [CRYPTO_SHA2_224] = ALG_DIGEST, [CRYPTO_SHA2_256] = ALG_DIGEST, [CRYPTO_SHA2_384] = ALG_DIGEST, [CRYPTO_SHA2_512] = ALG_DIGEST, [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, [CRYPTO_AES_CCM_16] = ALG_AEAD, }; static enum alg_type alg_type(int alg) { if (alg < nitems(alg_types)) return (alg_types[alg]); return (ALG_NONE); } static bool alg_is_compression(int alg) { return (alg_type(alg) == ALG_COMPRESSION); } static bool alg_is_cipher(int alg) { return (alg_type(alg) == ALG_CIPHER); } static bool alg_is_digest(int alg) { return (alg_type(alg) == ALG_DIGEST || alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_keyed_digest(int alg) { return (alg_type(alg) == ALG_KEYED_DIGEST); } static bool alg_is_aead(int alg) { return (alg_type(alg) == ALG_AEAD); } #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); if (csp->csp_flags & CSP_F_SEPARATE_AAD) return (false); /* IV is optional for digests (e.g. GMAC). */ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); /* * XXX: Would be nice to have a better way to get this * value. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen > 16) return (false); break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0) return (false); } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; zfree(cses, M_CRYPTO_DATA); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { static uint64_t sessid = 0; crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); /* Allocate a single block for the generic session and driver softc. */ res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->cap = cap; res->csp = *csp; res->id = atomic_fetchadd_64(&sessid, 1); /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(uint32_t driverid, int kalg, uint32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); gone_in_dev(cap->cc_dev, 14, "asymmetric crypto"); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(uint32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(uint32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } size_t crypto_buffer_len(struct crypto_buffer *cb) { switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: return (cb->cb_buf_len); case CRYPTO_BUF_MBUF: if (cb->cb_mbuf->m_flags & M_PKTHDR) return (cb->cb_mbuf->m_pkthdr.len); return (m_length(cb->cb_mbuf, NULL)); case CRYPTO_BUF_VMPAGE: return (cb->cb_vm_page_len); case CRYPTO_BUF_UIO: return (cb->cb_uio->uio_resid); default: return (0); } } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void cb_sanity(struct crypto_buffer *cb, const char *name) { KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid %s buffer type", name)); switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: KASSERT(cb->cb_buf_len >= 0, ("incoming crp with -ve %s buffer length", name)); break; case CRYPTO_BUF_VMPAGE: KASSERT(CRYPTO_HAS_VMPAGE, ("incoming crp uses dmap on supported arch")); KASSERT(cb->cb_vm_page_len >= 0, ("incoming crp with -ve %s buffer length", name)); KASSERT(cb->cb_vm_page_offset >= 0, ("incoming crp with -ve %s buffer offset", name)); KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, ("incoming crp with %s buffer offset greater than page size" , name)); break; default: break; } } static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; struct crypto_buffer *out; size_t ilen, len, olen; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, ("incoming crp with invalid output buffer type")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; cb_sanity(&crp->crp_buf, "input"); ilen = crypto_buffer_len(&crp->crp_buf); olen = ilen; out = NULL; if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { cb_sanity(&crp->crp_obuf, "output"); out = &crp->crp_obuf; olen = crypto_buffer_len(out); } } else KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, ("incoming crp with separate output buffer " "but no session support")); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("GCM without a separate IV")); if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("CCM without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_aad == NULL) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= ilen, ("AAD outside input length")); } else { KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, ("session doesn't support separate AAD buffer")); KASSERT(crp->crp_aad_start == 0, ("separate AAD buffer with non-zero AAD start")); KASSERT(crp->crp_aad_length != 0, ("separate AAD buffer with zero length")); } } else { KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, ("IV outside buffer length")); } /* XXX: payload_start of 0 should always be < ilen? */ KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= ilen, ("payload outside input buffer")); if (out == NULL) { KASSERT(crp->crp_payload_output_start == 0, ("payload output start non-zero without output buffer")); } else { KASSERT(crp->crp_payload_output_start < olen, ("invalid payload output start")); KASSERT(crp->crp_payload_output_start + crp->crp_payload_length <= olen, ("payload outside output buffer")); } if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) len = ilen; else len = olen; KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < len, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, ("digest outside buffer")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif -/* - * Add a crypto request to a queue, to be processed by the kernel thread. - */ -int -crypto_dispatch(struct cryptop *crp) +static int +crypto_dispatch_one(struct cryptop *crp, int hint) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif - CRYPTOSTAT_INC(cs_ops); crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; - if (CRYPTOP_ASYNC(crp)) { - if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { - struct crypto_ret_worker *ret_worker; + /* + * Caller marked the request to be processed immediately; dispatch it + * directly to the driver unless the driver is currently blocked, in + * which case it is queued for deferred dispatch. + */ + cap = crp->crp_session->cap; + if (!atomic_load_int(&cap->cc_qblocked)) { + result = crypto_invoke(cap, crp, hint); + if (result != ERESTART) + return (result); - ret_worker = CRYPTO_RETW(crp->crp_retw_id); + /* + * The driver ran out of resources, put the request on the + * queue. + */ + } + crypto_batch_enqueue(crp); + return (0); +} - CRYPTO_RETW_LOCK(ret_worker); - crp->crp_seq = ret_worker->reorder_ops++; - CRYPTO_RETW_UNLOCK(ret_worker); - } +int +crypto_dispatch(struct cryptop *crp) +{ + return (crypto_dispatch_one(crp, 0)); +} - TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); - taskqueue_enqueue(crypto_tq, &crp->crp_task); - return (0); - } +int +crypto_dispatch_async(struct cryptop *crp, int flags) +{ + struct crypto_ret_worker *ret_worker; - if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { + if (!CRYPTO_SESS_SYNC(crp->crp_session)) { /* - * Caller marked the request to be processed - * immediately; dispatch it directly to the - * driver unless the driver is currently blocked. + * The driver issues completions asynchonously, don't bother + * deferring dispatch to a worker thread. */ - cap = crp->crp_session->cap; - if (!cap->cc_qblocked) { - result = crypto_invoke(cap, crp, 0); - if (result != ERESTART) - return (result); - /* - * The driver ran out of resources, put the request on - * the queue. - */ - } + return (crypto_dispatch(crp)); } - crypto_batch_enqueue(crp); - return 0; + +#ifdef INVARIANTS + crp_sanity(crp); +#endif + CRYPTOSTAT_INC(cs_ops); + + crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; + if ((flags & CRYPTO_ASYNC_ORDERED) != 0) { + crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED; + ret_worker = CRYPTO_RETW(crp->crp_retw_id); + CRYPTO_RETW_LOCK(ret_worker); + crp->crp_seq = ret_worker->reorder_ops++; + CRYPTO_RETW_UNLOCK(ret_worker); + } + TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); + taskqueue_enqueue(crypto_tq, &crp->crp_task); + return (0); } void +crypto_dispatch_batch(struct cryptopq *crpq, int flags) +{ + struct cryptop *crp; + int hint; + + while ((crp = TAILQ_FIRST(crpq)) != NULL) { + hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0; + TAILQ_REMOVE(crpq, crp, crp_next); + if (crypto_dispatch_one(crp, hint) != 0) + crypto_batch_enqueue(crp); + } +} + +static void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; CRYPTOSTAT_INC(cs_kops); krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_destroyreq(struct cryptop *crp) { #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; crypto_destroyreq(crp); uma_zfree(cryptop_zone, crp); } static void _crypto_initreq(struct cryptop *crp, crypto_session_t cses) { crp->crp_session = cses; } void crypto_initreq(struct cryptop *crp, crypto_session_t cses) { memset(crp, 0, sizeof(*crp)); _crypto_initreq(crp, cses); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); if (crp != NULL) _crypto_initreq(crp, cses); return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) CRYPTOSTAT_INC(cs_errs); /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ - if (!CRYPTOP_ASYNC_KEEPORDER(crp) && - ((crp->crp_flags & CRYPTO_F_CBIMM) || - ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && - (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { + if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 && + ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 || + ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 && + CRYPTO_SESS_SYNC(crp->crp_session)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); - wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); - if (CRYPTOP_ASYNC_KEEPORDER(crp)) { + if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) { struct cryptop *tmp; - TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, - cryptop_q, crp_next) { + TAILQ_FOREACH_REVERSE(tmp, + &ret_worker->crp_ordered_ret_q, cryptop_q, + crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { - TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, - tmp, crp, crp_next); + TAILQ_INSERT_AFTER( + &ret_worker->crp_ordered_ret_q, tmp, + crp, crp_next); break; } } if (tmp == NULL) { - TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, - crp, crp_next); + TAILQ_INSERT_HEAD( + &ret_worker->crp_ordered_ret_q, crp, + crp_next); } - if (crp->crp_seq == ret_worker->reorder_cur_seq) - wake = true; - } - else { - if (CRYPTO_RETW_EMPTY(ret_worker)) - wake = true; - - TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); + wake = crp->crp_seq == ret_worker->reorder_cur_seq; + } else { + wake = TAILQ_EMPTY(&ret_worker->crp_ret_q); + TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, + crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) CRYPTOSTAT_INC(cs_kerrs); cap = krp->krp_cap; if (cap != NULL) { CRYPTO_DRIVER_LOCK(); KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); } ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); - if (CRYPTO_RETW_EMPTY(ret_worker)) + if (TAILQ_EMPTY(&ret_worker->crp_ret_kq)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; - break; } else { submit = crp; - if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) - break; - /* keep scanning for more are q'd */ } + break; } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); CRYPTOSTAT_INC(cs_blocks); } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); CRYPTOSTAT_INC(cs_kblocks); } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; CRYPTOSTAT_INC(cs_intrs); } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) crpt->crp_callback(crpt); if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; CRYPTOSTAT_INC(cs_rets); } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); diff --git a/sys/opencrypto/cryptodev.h b/sys/opencrypto/cryptodev.h index ecb1d929d1db..659599cb7d60 100644 --- a/sys/opencrypto/cryptodev.h +++ b/sys/opencrypto/cryptodev.h @@ -1,728 +1,721 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #ifdef _KERNEL #include #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 /* Hash values */ #define NULL_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define AES_GMAC_HASH_LEN 16 #define POLY1305_HASH_LEN 16 #define AES_CBC_MAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 #define AES_128_CBC_MAC_KEY_LEN 16 #define AES_192_CBC_MAC_KEY_LEN 24 #define AES_256_CBC_MAC_KEY_LEN 32 #define POLY1305_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define AES_GCM_IV_LEN 12 #define AES_CCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define CAMELLIA_MIN_KEY 16 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #ifdef _KERNEL #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #endif #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_POLY1305 38 #define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */ #define CRYPTO_AES_CCM_16 40 /* cipher side */ #define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* Does the kernel support vmpage buffers on this platform? */ #ifdef __powerpc__ #define CRYPTO_MAY_HAVE_VMPAGE 1 #else #define CRYPTO_MAY_HAVE_VMPAGE ( PMAP_HAS_DMAP ) #endif /* Does the currently running system support vmpage buffers on this platform? */ #define CRYPTO_HAS_VMPAGE ( PMAP_HAS_DMAP ) /* NB: deprecated */ struct session_op { uint32_t cipher; /* ie. CRYPTO_AES_CBC */ uint32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ uint32_t keylen; /* cipher key */ const void *key; int mackeylen; /* mac key */ const void *mackey; uint32_t ses; /* returns: session # */ }; /* * session and crypt _op structs are used by userspace programs to interact * with /dev/crypto. Confusingly, the internal kernel interface is named * "cryptop" (no underscore). */ struct session2_op { uint32_t cipher; /* ie. CRYPTO_AES_CBC */ uint32_t mac; /* ie. CRYPTO_SHA2_256_HMAC */ uint32_t keylen; /* cipher key */ const void *key; int mackeylen; /* mac key */ const void *mackey; uint32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { uint32_t ses; uint16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 uint16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; const void *src; /* become iov[] inside kernel */ void *dst; void *mac; /* must be big enough for chosen MAC */ const void *iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { uint32_t ses; uint16_t op; /* i.e. COP_ENCRYPT */ uint16_t flags; u_int len; u_int aadlen; u_int ivlen; const void *src; /* become iov[] inside kernel */ void *dst; const void *aad; /* additional authenticated data */ void *tag; /* must fit for chosen TAG length */ const void *iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { void *crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, uint32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, uint32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptostats { uint64_t cs_ops; /* symmetric crypto ops submitted */ uint64_t cs_errs; /* symmetric crypto ops that failed */ uint64_t cs_kops; /* asymetric/key ops submitted */ uint64_t cs_kerrs; /* asymetric/key ops that failed */ uint64_t cs_intrs; /* crypto swi thread activations */ uint64_t cs_rets; /* crypto return thread activations */ uint64_t cs_blocks; /* symmetric op driver block */ uint64_t cs_kblocks; /* symmetric op driver block */ }; #ifdef _KERNEL /* * Return values for cryptodev_probesession methods. */ #define CRYPTODEV_PROBE_HARDWARE (-100) #define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200) #define CRYPTODEV_PROBE_SOFTWARE (-500) #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif struct crypto_session_params { int csp_mode; /* Type of operations to perform. */ #define CSP_MODE_NONE 0 #define CSP_MODE_COMPRESS 1 /* Compression/decompression. */ #define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */ #define CSP_MODE_DIGEST 3 /* Compute/verify digest. */ #define CSP_MODE_AEAD 4 /* Combined auth/encryption. */ #define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */ int csp_flags; #define CSP_F_SEPARATE_OUTPUT 0x0001 /* Requests can use separate output */ #define CSP_F_SEPARATE_AAD 0x0002 /* Requests can use separate AAD */ #define CSP_F_ESN 0x0004 /* Requests can use seperate ESN field */ int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ const void *csp_cipher_key; int csp_auth_alg; int csp_auth_klen; /* Key length in bytes. */ const void *csp_auth_key; int csp_auth_mlen; /* Number of digest bytes to use. 0 means all. */ }; enum crypto_buffer_type { CRYPTO_BUF_NONE = 0, CRYPTO_BUF_CONTIG, CRYPTO_BUF_UIO, CRYPTO_BUF_MBUF, CRYPTO_BUF_VMPAGE, CRYPTO_BUF_LAST = CRYPTO_BUF_VMPAGE }; /* * Description of a data buffer for a request. Requests can either * have a single buffer that is modified in place or separate input * and output buffers. */ struct crypto_buffer { union { struct { char *cb_buf; int cb_buf_len; }; struct mbuf *cb_mbuf; struct { vm_page_t *cb_vm_page; int cb_vm_page_len; int cb_vm_page_offset; }; struct uio *cb_uio; }; enum crypto_buffer_type cb_type; }; /* * A cursor is used to iterate through a crypto request data buffer. */ struct crypto_buffer_cursor { union { char *cc_buf; struct mbuf *cc_mbuf; struct iovec *cc_iov; vm_page_t *cc_vmpage; }; /* Optional bytes of valid data remaining */ int cc_buf_len; /* * Optional offset within the current buffer segment where * valid data begins */ size_t cc_offset; enum crypto_buffer_type cc_type; }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; crypto_session_t crp_session; /* Session */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; -#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ -#define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads - * if op is synchronous - */ -#define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* - * Dispatch the crypto jobs in the same - * order there are submitted. Applied only - * if CRYPTO_F_ASYNC flags is set - */ +#define CRYPTO_F_ASYNC_ORDERED 0x0100 /* Completions must happen in order */ #define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */ int crp_op; struct crypto_buffer crp_buf; struct crypto_buffer crp_obuf; void *crp_aad; /* AAD buffer. */ int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ uint8_t crp_esn[4]; /* high-order ESN */ int crp_iv_start; /* Location of IV. IV length is from * the session. */ int crp_payload_start; /* Location of ciphertext. */ int crp_payload_output_start; int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. */ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */ const void *crp_cipher_key; /* New cipher key if non-NULL. */ const void *crp_auth_key; /* New auth key if non-NULL. */ void *crp_opaque; /* Opaque pointer, passed along */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; +TAILQ_HEAD(cryptopq, cryptop); + static __inline void _crypto_use_buf(struct crypto_buffer *cb, void *buf, int len) { cb->cb_buf = buf; cb->cb_buf_len = len; cb->cb_type = CRYPTO_BUF_CONTIG; } static __inline void _crypto_use_mbuf(struct crypto_buffer *cb, struct mbuf *m) { cb->cb_mbuf = m; cb->cb_type = CRYPTO_BUF_MBUF; } static __inline void _crypto_use_vmpage(struct crypto_buffer *cb, vm_page_t *pages, int len, int offset) { cb->cb_vm_page = pages; cb->cb_vm_page_len = len; cb->cb_vm_page_offset = offset; cb->cb_type = CRYPTO_BUF_VMPAGE; } static __inline void _crypto_use_uio(struct crypto_buffer *cb, struct uio *uio) { cb->cb_uio = uio; cb->cb_type = CRYPTO_BUF_UIO; } static __inline void crypto_use_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_buf, buf, len); } static __inline void crypto_use_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_buf, m); } static __inline void crypto_use_vmpage(struct cryptop *crp, vm_page_t *pages, int len, int offset) { _crypto_use_vmpage(&crp->crp_buf, pages, len, offset); } static __inline void crypto_use_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_buf, uio); } static __inline void crypto_use_output_buf(struct cryptop *crp, void *buf, int len) { _crypto_use_buf(&crp->crp_obuf, buf, len); } static __inline void crypto_use_output_mbuf(struct cryptop *crp, struct mbuf *m) { _crypto_use_mbuf(&crp->crp_obuf, m); } static __inline void crypto_use_output_vmpage(struct cryptop *crp, vm_page_t *pages, int len, int offset) { _crypto_use_vmpage(&crp->crp_obuf, pages, len, offset); } static __inline void crypto_use_output_uio(struct cryptop *crp, struct uio *uio) { _crypto_use_uio(&crp->crp_obuf, uio); } -#define CRYPTOP_ASYNC(crp) \ - (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ - crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) -#define CRYPTOP_ASYNC_KEEPORDER(crp) \ - (CRYPTOP_ASYNC(crp) && \ - (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_HAS_OUTPUT_BUFFER(crp) \ ((crp)->crp_obuf.cb_type != CRYPTO_BUF_NONE) /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 #define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT) #define CRYPTO_OP_COMPUTE_DIGEST 0x0 #define CRYPTO_OP_VERIFY_DIGEST 0x2 #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ uint32_t krp_hid; /* device used */ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ void (*krp_callback)(struct cryptkop *); struct cryptocap *krp_cap; }; uint32_t crypto_ses2hid(crypto_session_t crypto_session); uint32_t crypto_ses2caps(crypto_session_t crypto_session); void *crypto_get_driver_session(crypto_session_t crypto_session); const struct crypto_session_params *crypto_get_params( crypto_session_t crypto_session); struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp); struct enc_xform *crypto_cipher(const struct crypto_session_params *csp); MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *params, int hard); extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ #define CRYPTOCAP_F_ACCEL_SOFTWARE 0x08000000 +#define CRYPTO_SESS_SYNC(sess) \ + ((crypto_ses2caps(sess) & CRYPTOCAP_F_SYNC) != 0) extern int32_t crypto_get_driverid(device_t dev, size_t session_size, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_kregister(uint32_t, int, uint32_t); extern int crypto_unregister_all(uint32_t driverid); extern int crypto_dispatch(struct cryptop *crp); +#define CRYPTO_ASYNC_ORDERED 0x1 /* complete in order dispatched */ +extern int crypto_dispatch_async(struct cryptop *crp, int flags); +extern void crypto_dispatch_batch(struct cryptopq *crpq, int flags); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(uint32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_destroyreq(struct cryptop *crp); extern void crypto_initreq(struct cryptop *crp, crypto_session_t cses); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(crypto_session_t cses, int how); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ #ifdef SYSCTL_DECL SYSCTL_DECL(_kern_crypto); #endif /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; void hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx); void hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, void *auth_ctx); /* * Crypto-related utility routines used mainly by drivers. * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); int crypto_apply_buf(struct crypto_buffer *cb, int off, int len, int (*f)(void *, const void *, u_int), void *arg); void *crypto_buffer_contiguous_subsegment(struct crypto_buffer *cb, size_t skip, size_t len); size_t crypto_buffer_len(struct crypto_buffer *cb); void crypto_cursor_init(struct crypto_buffer_cursor *cc, const struct crypto_buffer *cb); void crypto_cursor_advance(struct crypto_buffer_cursor *cc, size_t amount); void *crypto_cursor_segbase(struct crypto_buffer_cursor *cc); size_t crypto_cursor_seglen(struct crypto_buffer_cursor *cc); void crypto_cursor_copyback(struct crypto_buffer_cursor *cc, int size, const void *vsrc); void crypto_cursor_copydata(struct crypto_buffer_cursor *cc, int size, void *vdst); void crypto_cursor_copydata_noadv(struct crypto_buffer_cursor *cc, int size, void *vdst); static __inline void crypto_read_iv(struct cryptop *crp, void *iv) { const struct crypto_session_params *csp; csp = crypto_get_params(crp->crp_session); if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) memcpy(iv, crp->crp_iv, csp->csp_ivlen); else crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); } #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ diff --git a/sys/sys/param.h b/sys/sys/param.h index d6f1eb21dcd2..058aef99e077 100644 --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -1,374 +1,374 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * documentation/content/en/books/porters-handbook/versions/chapter.adoc * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1400003 /* Master, propagated to newvers */ +#define __FreeBSD_version 1400004 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_CK_SUPERBLOCK 1300000 #define P_OSREL_CK_INODE 1300005 #define P_OSREL_POWERPC_NEW_AUX_ARGS 1300070 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 255 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL #ifndef LOCORE /* Signals. */ #include #endif #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) __align_down(x, y) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) __align_up(x, y) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */