diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index 41705e84bc4b..b6e693769804 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -1,435 +1,434 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include #include /* Cryptographic mechanisms tables and their access functions */ /* * Internal numbers assigned to mechanisms are coded as follows: * * +----------------+----------------+ * | mech. class | mech. index | * <--- 32-bits --->+<--- 32-bits ---> * * the mech_class identifies the table the mechanism belongs to. * mech_index is the index for that mechanism in the table. * A mechanism belongs to exactly 1 table. * The tables are: * . digest_mechs_tab[] for the msg digest mechs. * . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs. * . mac_mechs_tab[] for MAC mechs. * . sign_mechs_tab[] for sign & verify mechs. * . keyops_mechs_tab[] for key/key pair generation, and key derivation. * . misc_mechs_tab[] for mechs that don't belong to any of the above. * * There are no holes in the tables. */ /* * Locking conventions: * -------------------- * A mutex is associated with every entry of the tables. * The mutex is acquired whenever the entry is accessed for * 1) retrieving the mech_id (comparing the mech name) * 2) finding a provider for an xxx_init() or atomic operation. * 3) altering the mechs entry to add or remove a provider. * * In 2), after a provider is chosen, its prov_desc is held and the * entry's mutex must be dropped. The provider's working function (SPI) is * called outside the mech_entry's mutex. * * The number of providers for a particular mechanism is not expected to be * long enough to justify the cost of using rwlocks, so the per-mechanism * entry mutex won't be very *hot*. * */ /* Mechanisms tables */ /* RFE 4687834 Will deal with the extensibility of these tables later */ static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST]; static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER]; static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC]; const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { {0, NULL}, /* No class zero */ {KCF_MAXDIGEST, kcf_digest_mechs_tab}, {KCF_MAXCIPHER, kcf_cipher_mechs_tab}, {KCF_MAXMAC, kcf_mac_mechs_tab}, }; static avl_tree_t kcf_mech_hash; static int kcf_mech_hash_compar(const void *lhs, const void *rhs) { const kcf_mech_entry_t *l = lhs, *r = rhs; int cmp = strncmp(l->me_name, r->me_name, CRYPTO_MAX_MECH_NAME); return ((0 < cmp) - (cmp < 0)); } void kcf_destroy_mech_tabs(void) { for (void *cookie = NULL; avl_destroy_nodes(&kcf_mech_hash, &cookie); ) ; avl_destroy(&kcf_mech_hash); } /* * kcf_init_mech_tabs() * * Called by the misc/kcf's _init() routine to initialize the tables * of mech_entry's. */ void kcf_init_mech_tabs(void) { avl_create(&kcf_mech_hash, kcf_mech_hash_compar, sizeof (kcf_mech_entry_t), offsetof(kcf_mech_entry_t, me_node)); } /* * kcf_create_mech_entry() * * Arguments: * . The class of mechanism. * . the name of the new mechanism. * * Description: * Creates a new mech_entry for a mechanism not yet known to the * framework. * This routine is called by kcf_add_mech_provider, which is * in turn invoked for each mechanism supported by a provider. * The'class' argument depends on the crypto_func_group_t bitmask * in the registering provider's mech_info struct for this mechanism. * When there is ambiguity in the mapping between the crypto_func_group_t * and a class (dual ops, ...) the KCF_MISC_CLASS should be used. * * Context: * User context only. * * Returns: * KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or * the mechname is bogus. * KCF_MECH_TAB_FULL when there is no room left in the mech. tabs. * KCF_SUCCESS otherwise. */ static int kcf_create_mech_entry(kcf_ops_class_t class, const char *mechname) { if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) return (KCF_INVALID_MECH_CLASS); if ((mechname == NULL) || (mechname[0] == 0)) return (KCF_INVALID_MECH_NAME); /* * First check if the mechanism is already in one of the tables. * The mech_entry could be in another class. */ avl_index_t where = 0; kcf_mech_entry_t tmptab; strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME); if (avl_find(&kcf_mech_hash, &tmptab, &where) != NULL) return (KCF_SUCCESS); /* Now take the next unused mech entry in the class's tab */ kcf_mech_entry_t *me_tab = kcf_mech_tabs_tab[class].met_tab; int size = kcf_mech_tabs_tab[class].met_size; for (int i = 0; i < size; ++i) if (me_tab[i].me_name[0] == 0) { /* Found an empty spot */ strlcpy(me_tab[i].me_name, mechname, CRYPTO_MAX_MECH_NAME); me_tab[i].me_mechid = KCF_MECHID(class, i); /* Add the new mechanism to the hash table */ avl_insert(&kcf_mech_hash, &me_tab[i], where); return (KCF_SUCCESS); } return (KCF_MECH_TAB_FULL); } /* * kcf_add_mech_provider() * * Arguments: * . An index in to the provider mechanism array * . A pointer to the provider descriptor * . A storage for the kcf_prov_mech_desc_t the entry was added at. * * Description: * Adds a new provider of a mechanism to the mechanism's mech_entry * chain. * * Context: * User context only. * * Returns * KCF_SUCCESS on success * KCF_MECH_TAB_FULL otherwise. */ int kcf_add_mech_provider(short mech_indx, kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp) { int error; kcf_mech_entry_t *mech_entry = NULL; const crypto_mech_info_t *mech_info; crypto_mech_type_t kcf_mech_type; kcf_prov_mech_desc_t *prov_mech; mech_info = &prov_desc->pd_mechanisms[mech_indx]; /* * A mechanism belongs to exactly one mechanism table. * Find the class corresponding to the function group flag of * the mechanism. */ kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name); if (kcf_mech_type == CRYPTO_MECH_INVALID) { crypto_func_group_t fg = mech_info->cm_func_group_mask; kcf_ops_class_t class; if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC) class = KCF_DIGEST_CLASS; - else if (fg & CRYPTO_FG_ENCRYPT || fg & CRYPTO_FG_DECRYPT || - fg & CRYPTO_FG_ENCRYPT_ATOMIC || + else if (fg & CRYPTO_FG_ENCRYPT_ATOMIC || fg & CRYPTO_FG_DECRYPT_ATOMIC) class = KCF_CIPHER_CLASS; else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC) class = KCF_MAC_CLASS; else __builtin_unreachable(); /* * Attempt to create a new mech_entry for the specified * mechanism. kcf_create_mech_entry() can handle the case * where such an entry already exists. */ if ((error = kcf_create_mech_entry(class, mech_info->cm_mech_name)) != KCF_SUCCESS) { return (error); } /* get the KCF mech type that was assigned to the mechanism */ kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name); ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID); } error = kcf_get_mech_entry(kcf_mech_type, &mech_entry); ASSERT(error == KCF_SUCCESS); /* allocate and initialize new kcf_prov_mech_desc */ prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP); memcpy(&prov_mech->pm_mech_info, mech_info, sizeof (crypto_mech_info_t)); prov_mech->pm_prov_desc = prov_desc; prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)] [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx; KCF_PROV_REFHOLD(prov_desc); KCF_PROV_IREFHOLD(prov_desc); /* * Add new kcf_prov_mech_desc at the front of HW providers * chain. */ if (mech_entry->me_sw_prov != NULL) { /* * There is already a provider for this mechanism. * Since we allow only one provider per mechanism, * report this condition. */ cmn_err(CE_WARN, "The cryptographic provider " "\"%s\" will not be used for %s. The provider " "\"%s\" will be used for this mechanism " "instead.", prov_desc->pd_description, mech_info->cm_mech_name, mech_entry->me_sw_prov->pm_prov_desc-> pd_description); KCF_PROV_REFRELE(prov_desc); kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); prov_mech = NULL; } else { /* * Set the provider as the provider for * this mechanism. */ mech_entry->me_sw_prov = prov_mech; } *pmdpp = prov_mech; return (KCF_SUCCESS); } /* * kcf_remove_mech_provider() * * Arguments: * . mech_name: the name of the mechanism. * . prov_desc: The provider descriptor * * Description: * Removes a provider from chain of provider descriptors. * The provider is made unavailable to kernel consumers for the specified * mechanism. * * Context: * User context only. */ void kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc) { crypto_mech_type_t mech_type; kcf_prov_mech_desc_t *prov_mech = NULL; kcf_mech_entry_t *mech_entry; /* get the KCF mech type that was assigned to the mechanism */ if ((mech_type = crypto_mech2id(mech_name)) == CRYPTO_MECH_INVALID) { /* * Provider was not allowed for this mech due to policy or * configuration. */ return; } /* get a ptr to the mech_entry that was created */ if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) { /* * Provider was not allowed for this mech due to policy or * configuration. */ return; } if (mech_entry->me_sw_prov == NULL || mech_entry->me_sw_prov->pm_prov_desc != prov_desc) { /* not the provider for this mechanism */ return; } prov_mech = mech_entry->me_sw_prov; mech_entry->me_sw_prov = NULL; /* free entry */ KCF_PROV_IREFRELE(prov_mech->pm_prov_desc); KCF_PROV_REFRELE(prov_mech->pm_prov_desc); kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); } /* * kcf_get_mech_entry() * * Arguments: * . The framework mechanism type * . Storage for the mechanism entry * * Description: * Retrieves the mechanism entry for the mech. * * Context: * User and interrupt contexts. * * Returns: * KCF_MECHANISM_XXX appropriate error code. * KCF_SUCCESS otherwise. */ int kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep) { kcf_ops_class_t class; int index; const kcf_mech_entry_tab_t *me_tab; ASSERT(mep != NULL); class = KCF_MECH2CLASS(mech_type); if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { /* the caller won't need to know it's an invalid class */ return (KCF_INVALID_MECH_NUMBER); } me_tab = &kcf_mech_tabs_tab[class]; index = KCF_MECH2INDEX(mech_type); if ((index < 0) || (index >= me_tab->met_size)) { return (KCF_INVALID_MECH_NUMBER); } *mep = &((me_tab->met_tab)[index]); return (KCF_SUCCESS); } /* * crypto_mech2id() * * Arguments: * . mechname: A null-terminated string identifying the mechanism name. * * Description: * Walks the mechanisms tables, looking for an entry that matches the * mechname. Once it find it, it builds the 64-bit mech_type and returns * it. * * Context: * Process and interruption. * * Returns: * The unique mechanism identified by 'mechname', if found. * CRYPTO_MECH_INVALID otherwise. */ /* * Lookup the hash table for an entry that matches the mechname. * If there are no providers for the mechanism, * but there is an unloaded provider, this routine will attempt * to load it. */ crypto_mech_type_t crypto_mech2id(const char *mechname) { kcf_mech_entry_t tmptab, *found; strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME); if ((found = avl_find(&kcf_mech_hash, &tmptab, NULL))) { ASSERT(found->me_mechid != CRYPTO_MECH_INVALID); return (found->me_mechid); } return (CRYPTO_MECH_INVALID); } #if defined(_KERNEL) EXPORT_SYMBOL(crypto_mech2id); #endif diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h index 63dfce7957a8..9bcb62ac5290 100644 --- a/module/icp/include/sys/crypto/spi.h +++ b/module/icp/include/sys/crypto/spi.h @@ -1,238 +1,217 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _SYS_CRYPTO_SPI_H #define _SYS_CRYPTO_SPI_H /* * CSPI: Cryptographic Service Provider Interface. */ #include #include #ifdef __cplusplus extern "C" { #endif #ifdef CONSTIFY_PLUGIN #define __no_const __attribute__((no_const)) #else #define __no_const #endif /* CONSTIFY_PLUGIN */ /* * Context templates can be used to by providers to pre-process * keying material, such as key schedules. They are allocated by * a provider create_ctx_template(9E) entry point, and passed * as argument to initialization and atomic provider entry points. */ typedef void *crypto_spi_ctx_template_t; /* * The context structure is passed from the kernel to a provider. * It contains the information needed to process a multi-part or * single part operation. The context structure is not used * by atomic operations. * * Parameters needed to perform a cryptographic operation, such * as keys, mechanisms, input and output buffers, are passed * as separate arguments to Provider routines. */ typedef struct crypto_ctx { void *cc_provider_private; /* owned by provider */ void *cc_framework_private; /* owned by framework */ } crypto_ctx_t; /* * The crypto_digest_ops structure contains pointers to digest * operations for cryptographic providers. It is passed through * the crypto_ops(9S) structure when providers register with the * kernel using crypto_register_provider(9F). */ typedef struct crypto_digest_ops { int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *); int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); int (*digest_update)(crypto_ctx_t *, crypto_data_t *); int (*digest_key)(crypto_ctx_t *, crypto_key_t *); int (*digest_final)(crypto_ctx_t *, crypto_data_t *); int (*digest_atomic)(crypto_mechanism_t *, crypto_data_t *, crypto_data_t *); } __no_const crypto_digest_ops_t; /* * The crypto_cipher_ops structure contains pointers to encryption * and decryption operations for cryptographic providers. It is * passed through the crypto_ops(9S) structure when providers register * with the kernel using crypto_register_provider(9F). */ typedef struct crypto_cipher_ops { - int (*encrypt_init)(crypto_ctx_t *, - crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t); - int (*encrypt)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *); - int (*encrypt_update)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *); - int (*encrypt_final)(crypto_ctx_t *, - crypto_data_t *); int (*encrypt_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); - - int (*decrypt_init)(crypto_ctx_t *, - crypto_mechanism_t *, crypto_key_t *, - crypto_spi_ctx_template_t); - int (*decrypt)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *); - int (*decrypt_update)(crypto_ctx_t *, - crypto_data_t *, crypto_data_t *); - int (*decrypt_final)(crypto_ctx_t *, - crypto_data_t *); int (*decrypt_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_cipher_ops_t; /* * The crypto_mac_ops structure contains pointers to MAC * operations for cryptographic providers. It is passed through * the crypto_ops(9S) structure when providers register with the * kernel using crypto_register_provider(9F). */ typedef struct crypto_mac_ops { int (*mac_init)(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); int (*mac)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); int (*mac_update)(crypto_ctx_t *, crypto_data_t *); int (*mac_final)(crypto_ctx_t *, crypto_data_t *); int (*mac_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); int (*mac_verify_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_mac_ops_t; /* * The crypto_ctx_ops structure contains points to context and context * templates management operations for cryptographic providers. It is * passed through the crypto_ops(9S) structure when providers register * with the kernel using crypto_register_provider(9F). */ typedef struct crypto_ctx_ops { int (*create_ctx_template)(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); int (*free_context)(crypto_ctx_t *); } __no_const crypto_ctx_ops_t; /* * The crypto_ops(9S) structure contains the structures containing * the pointers to functions implemented by cryptographic providers. * It is specified as part of the crypto_provider_info(9S) * supplied by a provider when it registers with the kernel * by calling crypto_register_provider(9F). */ typedef struct crypto_ops { const crypto_digest_ops_t *co_digest_ops; const crypto_cipher_ops_t *co_cipher_ops; const crypto_mac_ops_t *co_mac_ops; const crypto_ctx_ops_t *co_ctx_ops; } crypto_ops_t; /* * The mechanism info structure crypto_mech_info_t contains a function group * bit mask cm_func_group_mask. This field, of type crypto_func_group_t, * specifies the provider entry point that can be used a particular * mechanism. The function group mask is a combination of the following values. */ typedef uint32_t crypto_func_group_t; -#define CRYPTO_FG_ENCRYPT 0x00000001 /* encrypt_init() */ -#define CRYPTO_FG_DECRYPT 0x00000002 /* decrypt_init() */ #define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */ #define CRYPTO_FG_MAC 0x00001000 /* mac_init() */ #define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */ #define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */ #define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */ #define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */ /* * Maximum length of the pi_provider_description field of the * crypto_provider_info structure. */ #define CRYPTO_PROVIDER_DESCR_MAX_LEN 64 /* * The crypto_mech_info structure specifies one of the mechanisms * supported by a cryptographic provider. The pi_mechanisms field of * the crypto_provider_info structure contains a pointer to an array * of crypto_mech_info's. */ typedef struct crypto_mech_info { crypto_mech_name_t cm_mech_name; crypto_mech_type_t cm_mech_number; crypto_func_group_t cm_func_group_mask; } crypto_mech_info_t; /* * crypto_kcf_provider_handle_t is a handle allocated by the kernel. * It is returned after the provider registers with * crypto_register_provider(), and must be specified by the provider * when calling crypto_unregister_provider(), and * crypto_provider_notification(). */ typedef uint_t crypto_kcf_provider_handle_t; /* * Provider information. Passed as argument to crypto_register_provider(9F). * Describes the provider and its capabilities. */ typedef struct crypto_provider_info { const char *pi_provider_description; const crypto_ops_t *pi_ops_vector; uint_t pi_mech_list_count; const crypto_mech_info_t *pi_mechanisms; } crypto_provider_info_t; /* * Functions exported by Solaris to cryptographic providers. Providers * call these functions to register and unregister, notify the kernel * of state changes, and notify the kernel when a asynchronous request * completed. */ extern int crypto_register_provider(const crypto_provider_info_t *, crypto_kcf_provider_handle_t *); extern int crypto_unregister_provider(crypto_kcf_provider_handle_t); #ifdef __cplusplus } #endif #endif /* _SYS_CRYPTO_SPI_H */ diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index a4ef1716710e..a68a878b6ab2 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -1,987 +1,498 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. */ /* * AES provider for the Kernel Cryptographic Framework (KCF) */ #include #include #include #include #include #include #define _AES_IMPL #include #include /* * Mechanism info structure passed to KCF during registration. */ static const crypto_mech_info_t aes_mech_info_tab[] = { /* AES_CCM */ {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, + CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC}, /* AES_GCM */ {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, + CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC}, }; -static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t); -static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t); -static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, - crypto_key_t *, crypto_spi_ctx_template_t, boolean_t); static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, crypto_mechanism_t *, crypto_key_t *, int, boolean_t); -static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *); -static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *); -static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); -static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, - crypto_data_t *); static int aes_encrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); -static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); -static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, - crypto_data_t *); static int aes_decrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_cipher_ops_t aes_cipher_ops = { - .encrypt_init = aes_encrypt_init, - .encrypt = aes_encrypt, - .encrypt_update = aes_encrypt_update, - .encrypt_final = aes_encrypt_final, .encrypt_atomic = aes_encrypt_atomic, - .decrypt_init = aes_decrypt_init, - .decrypt = aes_decrypt, - .decrypt_update = aes_decrypt_update, - .decrypt_final = aes_decrypt_final, .decrypt_atomic = aes_decrypt_atomic }; static int aes_create_ctx_template(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); static int aes_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t aes_ctx_ops = { .create_ctx_template = aes_create_ctx_template, .free_context = aes_free_context }; static const crypto_ops_t aes_crypto_ops = { NULL, &aes_cipher_ops, NULL, &aes_ctx_ops, }; static const crypto_provider_info_t aes_prov_info = { "AES Software Provider", &aes_crypto_ops, sizeof (aes_mech_info_tab) / sizeof (crypto_mech_info_t), aes_mech_info_tab }; static crypto_kcf_provider_handle_t aes_prov_handle = 0; int aes_mod_init(void) { /* Determine the fastest available implementation. */ aes_impl_init(); gcm_impl_init(); /* Register with KCF. If the registration fails, remove the module. */ if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) return (EACCES); return (0); } int aes_mod_fini(void) { /* Unregister from KCF if module is registered */ if (aes_prov_handle != 0) { if (crypto_unregister_provider(aes_prov_handle)) return (EBUSY); aes_prov_handle = 0; } return (0); } static int aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx) { void *p = NULL; boolean_t param_required = B_TRUE; size_t param_len; void *(*alloc_fun)(int); int rv = CRYPTO_SUCCESS; switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_CCM_PARAMS); alloc_fun = ccm_alloc_ctx; break; case AES_GCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_GCM_PARAMS); alloc_fun = gcm_alloc_ctx; break; default: __builtin_unreachable(); } if (param_required && mechanism->cm_param != NULL && mechanism->cm_param_len != param_len) { rv = CRYPTO_MECHANISM_PARAM_INVALID; } if (ctx != NULL) { p = (alloc_fun)(KM_SLEEP); *ctx = p; } return (rv); } /* * Initialize key schedules for AES */ static int init_keysched(crypto_key_t *key, void *newbie) { if (key->ck_length < AES_MINBITS || key->ck_length > AES_MAXBITS) { return (CRYPTO_KEY_SIZE_RANGE); } /* key length must be either 128, 192, or 256 */ if ((key->ck_length & 63) != 0) return (CRYPTO_KEY_SIZE_RANGE); aes_init_keysched(key->ck_data, key->ck_length, newbie); return (CRYPTO_SUCCESS); } -static int -aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t template) -{ - return (aes_common_init(ctx, mechanism, key, template, B_TRUE)); -} - -static int -aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t template) -{ - return (aes_common_init(ctx, mechanism, key, template, B_FALSE)); -} - - - /* * KCF software provider encrypt entry points. */ -static int -aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_spi_ctx_template_t template, - boolean_t is_encrypt_init) -{ - aes_ctx_t *aes_ctx; - int rv; - - if ((rv = aes_check_mech_param(mechanism, &aes_ctx)) - != CRYPTO_SUCCESS) - return (rv); - - rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, KM_SLEEP, - is_encrypt_init); - if (rv != CRYPTO_SUCCESS) { - crypto_free_mode_ctx(aes_ctx); - return (rv); - } - - ctx->cc_provider_private = aes_ctx; - - return (CRYPTO_SUCCESS); -} - -static int -aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, - crypto_data_t *ciphertext) -{ - int ret = CRYPTO_FAILED; - - aes_ctx_t *aes_ctx; - size_t saved_length, saved_offset, length_needed; - - ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; - - ASSERT(ciphertext != NULL); - - /* - * We need to just return the length needed to store the output. - * We should not destroy the context for the following case. - */ - switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) { - case CCM_MODE: - length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; - break; - case GCM_MODE: - length_needed = plaintext->cd_length + aes_ctx->ac_tag_len; - break; - default: - __builtin_unreachable(); - } - - if (ciphertext->cd_length < length_needed) { - ciphertext->cd_length = length_needed; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - saved_length = ciphertext->cd_length; - saved_offset = ciphertext->cd_offset; - - /* - * Do an update on the specified input data. - */ - ret = aes_encrypt_update(ctx, plaintext, ciphertext); - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - - /* - * For CCM mode, aes_ccm_encrypt_final() will take care of any - * left-over unprocessed data, and compute the MAC - */ - if (aes_ctx->ac_flags & CCM_MODE) { - /* - * ccm_encrypt_final() will compute the MAC and append - * it to existing ciphertext. So, need to adjust the left over - * length value accordingly - */ - - /* order of following 2 lines MUST not be reversed */ - ciphertext->cd_offset = ciphertext->cd_length; - ciphertext->cd_length = saved_length - ciphertext->cd_length; - ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - - if (plaintext != ciphertext) { - ciphertext->cd_length = - ciphertext->cd_offset - saved_offset; - } - ciphertext->cd_offset = saved_offset; - } else if (aes_ctx->ac_flags & GCM_MODE) { - /* - * gcm_encrypt_final() will compute the MAC and append - * it to existing ciphertext. So, need to adjust the left over - * length value accordingly - */ - - /* order of following 2 lines MUST not be reversed */ - ciphertext->cd_offset = ciphertext->cd_length; - ciphertext->cd_length = saved_length - ciphertext->cd_length; - ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext, - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, - aes_xor_block); - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - - if (plaintext != ciphertext) { - ciphertext->cd_length = - ciphertext->cd_offset - saved_offset; - } - ciphertext->cd_offset = saved_offset; - } - - ASSERT(aes_ctx->ac_remainder_len == 0); - (void) aes_free_context(ctx); - - return (ret); -} - - -static int -aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, - crypto_data_t *plaintext) -{ - int ret = CRYPTO_FAILED; - - aes_ctx_t *aes_ctx; - off_t saved_offset; - size_t saved_length, length_needed; - - ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; - - ASSERT(plaintext != NULL); - - /* - * Return length needed to store the output. - * Do not destroy context when plaintext buffer is too small. - * - * CCM: plaintext is MAC len smaller than cipher text - * GCM: plaintext is TAG len smaller than cipher text - */ - switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) { - case CCM_MODE: - length_needed = aes_ctx->ac_processed_data_len; - break; - case GCM_MODE: - length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len; - break; - default: - __builtin_unreachable(); - } - - if (plaintext->cd_length < length_needed) { - plaintext->cd_length = length_needed; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - saved_offset = plaintext->cd_offset; - saved_length = plaintext->cd_length; - - /* - * Do an update on the specified input data. - */ - ret = aes_decrypt_update(ctx, ciphertext, plaintext); - if (ret != CRYPTO_SUCCESS) { - goto cleanup; - } - - if (aes_ctx->ac_flags & CCM_MODE) { - ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len); - ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len); - - /* order of following 2 lines MUST not be reversed */ - plaintext->cd_offset = plaintext->cd_length; - plaintext->cd_length = saved_length - plaintext->cd_length; - - ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext, - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, - aes_xor_block); - if (ret == CRYPTO_SUCCESS) { - if (plaintext != ciphertext) { - plaintext->cd_length = - plaintext->cd_offset - saved_offset; - } - } else { - plaintext->cd_length = saved_length; - } - - plaintext->cd_offset = saved_offset; - } else if (aes_ctx->ac_flags & GCM_MODE) { - /* order of following 2 lines MUST not be reversed */ - plaintext->cd_offset = plaintext->cd_length; - plaintext->cd_length = saved_length - plaintext->cd_length; - - ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - if (ret == CRYPTO_SUCCESS) { - if (plaintext != ciphertext) { - plaintext->cd_length = - plaintext->cd_offset - saved_offset; - } - } else { - plaintext->cd_length = saved_length; - } - - plaintext->cd_offset = saved_offset; - } - - ASSERT(aes_ctx->ac_remainder_len == 0); - -cleanup: - (void) aes_free_context(ctx); - - return (ret); -} - - -static int -aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, - crypto_data_t *ciphertext) -{ - off_t saved_offset; - size_t saved_length, out_len; - int ret = CRYPTO_SUCCESS; - aes_ctx_t *aes_ctx; - - ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; - - ASSERT(ciphertext != NULL); - - /* compute number of bytes that will hold the ciphertext */ - out_len = aes_ctx->ac_remainder_len; - out_len += plaintext->cd_length; - out_len &= ~(AES_BLOCK_LEN - 1); - - /* return length needed to store the output */ - if (ciphertext->cd_length < out_len) { - ciphertext->cd_length = out_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - saved_offset = ciphertext->cd_offset; - saved_length = ciphertext->cd_length; - - /* - * Do the AES update on the specified input data. - */ - switch (plaintext->cd_format) { - case CRYPTO_DATA_RAW: - ret = crypto_update_iov(ctx->cc_provider_private, - plaintext, ciphertext, aes_encrypt_contiguous_blocks); - break; - case CRYPTO_DATA_UIO: - ret = crypto_update_uio(ctx->cc_provider_private, - plaintext, ciphertext, aes_encrypt_contiguous_blocks); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - if (ret == CRYPTO_SUCCESS) { - if (plaintext != ciphertext) - ciphertext->cd_length = - ciphertext->cd_offset - saved_offset; - } else { - ciphertext->cd_length = saved_length; - } - ciphertext->cd_offset = saved_offset; - - return (ret); -} - - -static int -aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, - crypto_data_t *plaintext) -{ - off_t saved_offset; - size_t saved_length; - int ret = CRYPTO_SUCCESS; - - ASSERT(ctx->cc_provider_private != NULL); - - ASSERT(plaintext != NULL); - - saved_offset = plaintext->cd_offset; - saved_length = plaintext->cd_length; - - /* - * Do the AES update on the specified input data. - */ - switch (ciphertext->cd_format) { - case CRYPTO_DATA_RAW: - ret = crypto_update_iov(ctx->cc_provider_private, - ciphertext, plaintext, aes_decrypt_contiguous_blocks); - break; - case CRYPTO_DATA_UIO: - ret = crypto_update_uio(ctx->cc_provider_private, - ciphertext, plaintext, aes_decrypt_contiguous_blocks); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - if (ret == CRYPTO_SUCCESS) { - if (ciphertext != plaintext) - plaintext->cd_length = - plaintext->cd_offset - saved_offset; - } else { - plaintext->cd_length = saved_length; - } - plaintext->cd_offset = saved_offset; - - - return (ret); -} - -static int -aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) -{ - aes_ctx_t *aes_ctx; - int ret; - - ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; - - if (data->cd_format != CRYPTO_DATA_RAW && - data->cd_format != CRYPTO_DATA_UIO) { - return (CRYPTO_ARGUMENTS_BAD); - } - - if (aes_ctx->ac_flags & CCM_MODE) { - ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - } else if (aes_ctx->ac_flags & GCM_MODE) { - size_t saved_offset = data->cd_offset; - - ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data, - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, - aes_xor_block); - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - data->cd_length = data->cd_offset - saved_offset; - data->cd_offset = saved_offset; - } - - (void) aes_free_context(ctx); - - return (CRYPTO_SUCCESS); -} - -static int -aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) -{ - aes_ctx_t *aes_ctx; - int ret; - off_t saved_offset; - size_t saved_length; - - ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; - - if (data->cd_format != CRYPTO_DATA_RAW && - data->cd_format != CRYPTO_DATA_UIO) { - return (CRYPTO_ARGUMENTS_BAD); - } - - /* - * There must be no unprocessed ciphertext. - * This happens if the length of the last ciphertext is - * not a multiple of the AES block length. - */ - if (aes_ctx->ac_remainder_len > 0) - return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - - if (aes_ctx->ac_flags & CCM_MODE) { - /* - * This is where all the plaintext is returned, make sure - * the plaintext buffer is big enough - */ - size_t pt_len = aes_ctx->ac_data_len; - if (data->cd_length < pt_len) { - data->cd_length = pt_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - ASSERT(aes_ctx->ac_processed_data_len == pt_len); - ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len); - saved_offset = data->cd_offset; - saved_length = data->cd_length; - ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data, - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, - aes_xor_block); - if (ret == CRYPTO_SUCCESS) { - data->cd_length = data->cd_offset - saved_offset; - } else { - data->cd_length = saved_length; - } - - data->cd_offset = saved_offset; - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - } else if (aes_ctx->ac_flags & GCM_MODE) { - /* - * This is where all the plaintext is returned, make sure - * the plaintext buffer is big enough - */ - gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; - size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; - - if (data->cd_length < pt_len) { - data->cd_length = pt_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - saved_offset = data->cd_offset; - saved_length = data->cd_length; - ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - if (ret == CRYPTO_SUCCESS) { - data->cd_length = data->cd_offset - saved_offset; - } else { - data->cd_length = saved_length; - } - - data->cd_offset = saved_offset; - if (ret != CRYPTO_SUCCESS) { - return (ret); - } - } - - - (void) aes_free_context(ctx); - - return (CRYPTO_SUCCESS); -} - static int aes_encrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(ciphertext != NULL); if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_TRUE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_mac_len; break; case AES_GCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_tag_len; break; default: __builtin_unreachable(); } /* return size of buffer needed to store output */ if (ciphertext->cd_length < length_needed) { ciphertext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = ciphertext->cd_offset; saved_length = ciphertext->cd_length; /* * Do an update on the specified input data. */ switch (plaintext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); } else { ASSERT(aes_ctx.ac_remainder_len == 0); } if (plaintext != ciphertext) { ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } } else { ciphertext->cd_length = saved_length; } ciphertext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } static int aes_decrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(plaintext != NULL); if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_FALSE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = aes_ctx.ac_data_len; break; case AES_GCM_MECH_INFO_TYPE: length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len; break; default: __builtin_unreachable(); } /* return size of buffer needed to store output */ if (plaintext->cd_length < length_needed) { plaintext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; /* * Do an update on the specified input data. */ switch (ciphertext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ASSERT(aes_ctx.ac_processed_data_len == aes_ctx.ac_data_len); ASSERT(aes_ctx.ac_processed_mac_len == aes_ctx.ac_mac_len); ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } } else __builtin_unreachable(); } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } if (aes_ctx.ac_flags & CCM_MODE) { if (aes_ctx.ac_pt_buf != NULL) { vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len); } } else if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } /* * KCF software provider context template entry points. */ static int aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size) { void *keysched; size_t size; int rv; if (mechanism->cm_type != AES_CCM_MECH_INFO_TYPE && mechanism->cm_type != AES_GCM_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); if ((keysched = aes_alloc_keysched(&size, KM_SLEEP)) == NULL) { return (CRYPTO_HOST_MEMORY); } /* * Initialize key schedule. Key length information is stored * in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { memset(keysched, 0, size); kmem_free(keysched, size); return (rv); } *tmpl = keysched; *tmpl_size = size; return (CRYPTO_SUCCESS); } static int aes_free_context(crypto_ctx_t *ctx) { aes_ctx_t *aes_ctx = ctx->cc_provider_private; if (aes_ctx != NULL) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { ASSERT(aes_ctx->ac_keysched_len != 0); memset(aes_ctx->ac_keysched, 0, aes_ctx->ac_keysched_len); kmem_free(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); } crypto_free_mode_ctx(aes_ctx); ctx->cc_provider_private = NULL; } return (CRYPTO_SUCCESS); } static int aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag, boolean_t is_encrypt_init) { int rv = CRYPTO_SUCCESS; void *keysched; size_t size = 0; if (template == NULL) { if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) return (CRYPTO_HOST_MEMORY); /* * Initialize key schedule. * Key length is stored in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { kmem_free(keysched, size); return (rv); } aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE; aes_ctx->ac_keysched_len = size; } else { keysched = template; } aes_ctx->ac_keysched = keysched; switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param, kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); break; case AES_GCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); break; } if (rv != CRYPTO_SUCCESS) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(keysched, 0, size); kmem_free(keysched, size); } } return (rv); }