diff --git a/include/sys/skein.h b/include/sys/skein.h index 2f649d6b269a..3359d48af795 100644 --- a/include/sys/skein.h +++ b/include/sys/skein.h @@ -1,183 +1,174 @@ /* * Interface declarations for Skein hashing. * Source code author: Doug Whiting, 2008. * This algorithm and source code is released to the public domain. * * The following compile-time switches may be defined to control some * tradeoffs between speed, code size, error checking, and security. * * The "default" note explains what happens when the switch is not defined. * * SKEIN_DEBUG -- make callouts from inside Skein code * to examine/display intermediate values. * [default: no callouts (no overhead)] * * SKEIN_ERR_CHECK -- how error checking is handled inside Skein * code. If not defined, most error checking * is disabled (for performance). Otherwise, * the switch value is interpreted as: * 0: use assert() to flag errors * 1: return SKEIN_FAIL to flag errors */ /* Copyright 2013 Doug Whiting. This code is released to the public domain. */ #ifndef _SYS_SKEIN_H_ #define _SYS_SKEIN_H_ #ifdef _KERNEL #include /* get size_t definition */ #else #include #include #endif #ifdef __cplusplus extern "C" { #endif enum { SKEIN_SUCCESS = 0, /* return codes from Skein calls */ SKEIN_FAIL = 1, SKEIN_BAD_HASHLEN = 2 }; #define SKEIN_MODIFIER_WORDS (2) /* number of modifier (tweak) words */ #define SKEIN_256_STATE_WORDS (4) #define SKEIN_512_STATE_WORDS (8) #define SKEIN1024_STATE_WORDS (16) #define SKEIN_MAX_STATE_WORDS (16) #define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS) #define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS) #define SKEIN1024_STATE_BYTES (8 * SKEIN1024_STATE_WORDS) #define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS) #define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS) #define SKEIN1024_STATE_BITS (64 * SKEIN1024_STATE_WORDS) #define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS) #define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS) #define SKEIN1024_BLOCK_BYTES (8 * SKEIN1024_STATE_WORDS) typedef struct { size_t hashBitLen; /* size of hash result, in bits */ size_t bCnt; /* current byte count in buffer b[] */ /* tweak words: T[0]=byte cnt, T[1]=flags */ uint64_t T[SKEIN_MODIFIER_WORDS]; } Skein_Ctxt_Hdr_t; typedef struct { /* 256-bit Skein hash context structure */ Skein_Ctxt_Hdr_t h; /* common header context variables */ uint64_t X[SKEIN_256_STATE_WORDS]; /* chaining variables */ /* partial block buffer (8-byte aligned) */ uint8_t b[SKEIN_256_BLOCK_BYTES]; } Skein_256_Ctxt_t; typedef struct { /* 512-bit Skein hash context structure */ Skein_Ctxt_Hdr_t h; /* common header context variables */ uint64_t X[SKEIN_512_STATE_WORDS]; /* chaining variables */ /* partial block buffer (8-byte aligned) */ uint8_t b[SKEIN_512_BLOCK_BYTES]; } Skein_512_Ctxt_t; typedef struct { /* 1024-bit Skein hash context structure */ Skein_Ctxt_Hdr_t h; /* common header context variables */ uint64_t X[SKEIN1024_STATE_WORDS]; /* chaining variables */ /* partial block buffer (8-byte aligned) */ uint8_t b[SKEIN1024_BLOCK_BYTES]; } Skein1024_Ctxt_t; /* Skein APIs for (incremental) "straight hashing" */ int Skein_256_Init(Skein_256_Ctxt_t *ctx, size_t hashBitLen); int Skein_512_Init(Skein_512_Ctxt_t *ctx, size_t hashBitLen); int Skein1024_Init(Skein1024_Ctxt_t *ctx, size_t hashBitLen); int Skein_256_Update(Skein_256_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt); int Skein_512_Update(Skein_512_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt); int Skein1024_Update(Skein1024_Ctxt_t *ctx, const uint8_t *msg, size_t msgByteCnt); int Skein_256_Final(Skein_256_Ctxt_t *ctx, uint8_t *hashVal); int Skein_512_Final(Skein_512_Ctxt_t *ctx, uint8_t *hashVal); int Skein1024_Final(Skein1024_Ctxt_t *ctx, uint8_t *hashVal); /* * Skein APIs for "extended" initialization: MAC keys, tree hashing. * After an InitExt() call, just use Update/Final calls as with Init(). * * Notes: Same parameters as _Init() calls, plus treeInfo/key/keyBytes. * When keyBytes == 0 and treeInfo == SKEIN_SEQUENTIAL, * the results of InitExt() are identical to calling Init(). * The function Init() may be called once to "precompute" the IV for * a given hashBitLen value, then by saving a copy of the context * the IV computation may be avoided in later calls. * Similarly, the function InitExt() may be called once per MAC key * to precompute the MAC IV, then a copy of the context saved and * reused for each new MAC computation. */ int Skein_256_InitExt(Skein_256_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, const uint8_t *key, size_t keyBytes); int Skein_512_InitExt(Skein_512_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, const uint8_t *key, size_t keyBytes); int Skein1024_InitExt(Skein1024_Ctxt_t *ctx, size_t hashBitLen, uint64_t treeInfo, const uint8_t *key, size_t keyBytes); /* * Skein APIs for MAC and tree hash: * Final_Pad: pad, do final block, but no OUTPUT type * Output: do just the output stage */ int Skein_256_Final_Pad(Skein_256_Ctxt_t *ctx, uint8_t *hashVal); int Skein_512_Final_Pad(Skein_512_Ctxt_t *ctx, uint8_t *hashVal); int Skein1024_Final_Pad(Skein1024_Ctxt_t *ctx, uint8_t *hashVal); #ifndef SKEIN_TREE_HASH #define SKEIN_TREE_HASH (1) #endif #if SKEIN_TREE_HASH int Skein_256_Output(Skein_256_Ctxt_t *ctx, uint8_t *hashVal); int Skein_512_Output(Skein_512_Ctxt_t *ctx, uint8_t *hashVal); int Skein1024_Output(Skein1024_Ctxt_t *ctx, uint8_t *hashVal); #endif /* * When you initialize a Skein KCF hashing method you can pass this param * structure in cm_param to fine-tune the algorithm's defaults. */ typedef struct skein_param { size_t sp_digest_bitlen; /* length of digest in bits */ } skein_param_t; /* Module definitions */ #ifdef SKEIN_MODULE_IMPL -#define CKM_SKEIN_256 "CKM_SKEIN_256" -#define CKM_SKEIN_512 "CKM_SKEIN_512" -#define CKM_SKEIN1024 "CKM_SKEIN1024" #define CKM_SKEIN_256_MAC "CKM_SKEIN_256_MAC" #define CKM_SKEIN_512_MAC "CKM_SKEIN_512_MAC" #define CKM_SKEIN1024_MAC "CKM_SKEIN1024_MAC" typedef enum skein_mech_type { - SKEIN_256_MECH_INFO_TYPE, - SKEIN_512_MECH_INFO_TYPE, - SKEIN1024_MECH_INFO_TYPE, SKEIN_256_MAC_MECH_INFO_TYPE, SKEIN_512_MAC_MECH_INFO_TYPE, SKEIN1024_MAC_MECH_INFO_TYPE } skein_mech_type_t; -#define VALID_SKEIN_DIGEST_MECH(__mech) \ - ((int)(__mech) >= SKEIN_256_MECH_INFO_TYPE && \ - (__mech) <= SKEIN1024_MECH_INFO_TYPE) #define VALID_SKEIN_MAC_MECH(__mech) \ ((int)(__mech) >= SKEIN_256_MAC_MECH_INFO_TYPE && \ (__mech) <= SKEIN1024_MAC_MECH_INFO_TYPE) #endif /* SKEIN_MODULE_IMPL */ #ifdef __cplusplus } #endif #endif /* _SYS_SKEIN_H_ */ diff --git a/module/icp/core/kcf_mech_tabs.c b/module/icp/core/kcf_mech_tabs.c index b6e693769804..a1e95847d066 100644 --- a/module/icp/core/kcf_mech_tabs.c +++ b/module/icp/core/kcf_mech_tabs.c @@ -1,434 +1,429 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include #include /* Cryptographic mechanisms tables and their access functions */ /* * Internal numbers assigned to mechanisms are coded as follows: * * +----------------+----------------+ * | mech. class | mech. index | * <--- 32-bits --->+<--- 32-bits ---> * * the mech_class identifies the table the mechanism belongs to. * mech_index is the index for that mechanism in the table. * A mechanism belongs to exactly 1 table. * The tables are: - * . digest_mechs_tab[] for the msg digest mechs. * . cipher_mechs_tab[] for encrypt/decrypt and wrap/unwrap mechs. * . mac_mechs_tab[] for MAC mechs. * . sign_mechs_tab[] for sign & verify mechs. * . keyops_mechs_tab[] for key/key pair generation, and key derivation. * . misc_mechs_tab[] for mechs that don't belong to any of the above. * * There are no holes in the tables. */ /* * Locking conventions: * -------------------- * A mutex is associated with every entry of the tables. * The mutex is acquired whenever the entry is accessed for * 1) retrieving the mech_id (comparing the mech name) * 2) finding a provider for an xxx_init() or atomic operation. * 3) altering the mechs entry to add or remove a provider. * * In 2), after a provider is chosen, its prov_desc is held and the * entry's mutex must be dropped. The provider's working function (SPI) is * called outside the mech_entry's mutex. * * The number of providers for a particular mechanism is not expected to be * long enough to justify the cost of using rwlocks, so the per-mechanism * entry mutex won't be very *hot*. * */ /* Mechanisms tables */ /* RFE 4687834 Will deal with the extensibility of these tables later */ -static kcf_mech_entry_t kcf_digest_mechs_tab[KCF_MAXDIGEST]; static kcf_mech_entry_t kcf_cipher_mechs_tab[KCF_MAXCIPHER]; static kcf_mech_entry_t kcf_mac_mechs_tab[KCF_MAXMAC]; const kcf_mech_entry_tab_t kcf_mech_tabs_tab[KCF_LAST_OPSCLASS + 1] = { {0, NULL}, /* No class zero */ - {KCF_MAXDIGEST, kcf_digest_mechs_tab}, {KCF_MAXCIPHER, kcf_cipher_mechs_tab}, {KCF_MAXMAC, kcf_mac_mechs_tab}, }; static avl_tree_t kcf_mech_hash; static int kcf_mech_hash_compar(const void *lhs, const void *rhs) { const kcf_mech_entry_t *l = lhs, *r = rhs; int cmp = strncmp(l->me_name, r->me_name, CRYPTO_MAX_MECH_NAME); return ((0 < cmp) - (cmp < 0)); } void kcf_destroy_mech_tabs(void) { for (void *cookie = NULL; avl_destroy_nodes(&kcf_mech_hash, &cookie); ) ; avl_destroy(&kcf_mech_hash); } /* * kcf_init_mech_tabs() * * Called by the misc/kcf's _init() routine to initialize the tables * of mech_entry's. */ void kcf_init_mech_tabs(void) { avl_create(&kcf_mech_hash, kcf_mech_hash_compar, sizeof (kcf_mech_entry_t), offsetof(kcf_mech_entry_t, me_node)); } /* * kcf_create_mech_entry() * * Arguments: * . The class of mechanism. * . the name of the new mechanism. * * Description: * Creates a new mech_entry for a mechanism not yet known to the * framework. * This routine is called by kcf_add_mech_provider, which is * in turn invoked for each mechanism supported by a provider. * The'class' argument depends on the crypto_func_group_t bitmask * in the registering provider's mech_info struct for this mechanism. * When there is ambiguity in the mapping between the crypto_func_group_t * and a class (dual ops, ...) the KCF_MISC_CLASS should be used. * * Context: * User context only. * * Returns: * KCF_INVALID_MECH_CLASS or KCF_INVALID_MECH_NAME if the class or * the mechname is bogus. * KCF_MECH_TAB_FULL when there is no room left in the mech. tabs. * KCF_SUCCESS otherwise. */ static int kcf_create_mech_entry(kcf_ops_class_t class, const char *mechname) { if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) return (KCF_INVALID_MECH_CLASS); if ((mechname == NULL) || (mechname[0] == 0)) return (KCF_INVALID_MECH_NAME); /* * First check if the mechanism is already in one of the tables. * The mech_entry could be in another class. */ avl_index_t where = 0; kcf_mech_entry_t tmptab; strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME); if (avl_find(&kcf_mech_hash, &tmptab, &where) != NULL) return (KCF_SUCCESS); /* Now take the next unused mech entry in the class's tab */ kcf_mech_entry_t *me_tab = kcf_mech_tabs_tab[class].met_tab; int size = kcf_mech_tabs_tab[class].met_size; for (int i = 0; i < size; ++i) if (me_tab[i].me_name[0] == 0) { /* Found an empty spot */ strlcpy(me_tab[i].me_name, mechname, CRYPTO_MAX_MECH_NAME); me_tab[i].me_mechid = KCF_MECHID(class, i); /* Add the new mechanism to the hash table */ avl_insert(&kcf_mech_hash, &me_tab[i], where); return (KCF_SUCCESS); } return (KCF_MECH_TAB_FULL); } /* * kcf_add_mech_provider() * * Arguments: * . An index in to the provider mechanism array * . A pointer to the provider descriptor * . A storage for the kcf_prov_mech_desc_t the entry was added at. * * Description: * Adds a new provider of a mechanism to the mechanism's mech_entry * chain. * * Context: * User context only. * * Returns * KCF_SUCCESS on success * KCF_MECH_TAB_FULL otherwise. */ int kcf_add_mech_provider(short mech_indx, kcf_provider_desc_t *prov_desc, kcf_prov_mech_desc_t **pmdpp) { int error; kcf_mech_entry_t *mech_entry = NULL; const crypto_mech_info_t *mech_info; crypto_mech_type_t kcf_mech_type; kcf_prov_mech_desc_t *prov_mech; mech_info = &prov_desc->pd_mechanisms[mech_indx]; /* * A mechanism belongs to exactly one mechanism table. * Find the class corresponding to the function group flag of * the mechanism. */ kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name); if (kcf_mech_type == CRYPTO_MECH_INVALID) { crypto_func_group_t fg = mech_info->cm_func_group_mask; kcf_ops_class_t class; - if (fg & CRYPTO_FG_DIGEST || fg & CRYPTO_FG_DIGEST_ATOMIC) - class = KCF_DIGEST_CLASS; - else if (fg & CRYPTO_FG_ENCRYPT_ATOMIC || + if (fg & CRYPTO_FG_ENCRYPT_ATOMIC || fg & CRYPTO_FG_DECRYPT_ATOMIC) class = KCF_CIPHER_CLASS; else if (fg & CRYPTO_FG_MAC || fg & CRYPTO_FG_MAC_ATOMIC) class = KCF_MAC_CLASS; else __builtin_unreachable(); /* * Attempt to create a new mech_entry for the specified * mechanism. kcf_create_mech_entry() can handle the case * where such an entry already exists. */ if ((error = kcf_create_mech_entry(class, mech_info->cm_mech_name)) != KCF_SUCCESS) { return (error); } /* get the KCF mech type that was assigned to the mechanism */ kcf_mech_type = crypto_mech2id(mech_info->cm_mech_name); ASSERT(kcf_mech_type != CRYPTO_MECH_INVALID); } error = kcf_get_mech_entry(kcf_mech_type, &mech_entry); ASSERT(error == KCF_SUCCESS); /* allocate and initialize new kcf_prov_mech_desc */ prov_mech = kmem_zalloc(sizeof (kcf_prov_mech_desc_t), KM_SLEEP); memcpy(&prov_mech->pm_mech_info, mech_info, sizeof (crypto_mech_info_t)); prov_mech->pm_prov_desc = prov_desc; prov_desc->pd_mech_indx[KCF_MECH2CLASS(kcf_mech_type)] [KCF_MECH2INDEX(kcf_mech_type)] = mech_indx; KCF_PROV_REFHOLD(prov_desc); KCF_PROV_IREFHOLD(prov_desc); /* * Add new kcf_prov_mech_desc at the front of HW providers * chain. */ if (mech_entry->me_sw_prov != NULL) { /* * There is already a provider for this mechanism. * Since we allow only one provider per mechanism, * report this condition. */ cmn_err(CE_WARN, "The cryptographic provider " "\"%s\" will not be used for %s. The provider " "\"%s\" will be used for this mechanism " "instead.", prov_desc->pd_description, mech_info->cm_mech_name, mech_entry->me_sw_prov->pm_prov_desc-> pd_description); KCF_PROV_REFRELE(prov_desc); kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); prov_mech = NULL; } else { /* * Set the provider as the provider for * this mechanism. */ mech_entry->me_sw_prov = prov_mech; } *pmdpp = prov_mech; return (KCF_SUCCESS); } /* * kcf_remove_mech_provider() * * Arguments: * . mech_name: the name of the mechanism. * . prov_desc: The provider descriptor * * Description: * Removes a provider from chain of provider descriptors. * The provider is made unavailable to kernel consumers for the specified * mechanism. * * Context: * User context only. */ void kcf_remove_mech_provider(const char *mech_name, kcf_provider_desc_t *prov_desc) { crypto_mech_type_t mech_type; kcf_prov_mech_desc_t *prov_mech = NULL; kcf_mech_entry_t *mech_entry; /* get the KCF mech type that was assigned to the mechanism */ if ((mech_type = crypto_mech2id(mech_name)) == CRYPTO_MECH_INVALID) { /* * Provider was not allowed for this mech due to policy or * configuration. */ return; } /* get a ptr to the mech_entry that was created */ if (kcf_get_mech_entry(mech_type, &mech_entry) != KCF_SUCCESS) { /* * Provider was not allowed for this mech due to policy or * configuration. */ return; } if (mech_entry->me_sw_prov == NULL || mech_entry->me_sw_prov->pm_prov_desc != prov_desc) { /* not the provider for this mechanism */ return; } prov_mech = mech_entry->me_sw_prov; mech_entry->me_sw_prov = NULL; /* free entry */ KCF_PROV_IREFRELE(prov_mech->pm_prov_desc); KCF_PROV_REFRELE(prov_mech->pm_prov_desc); kmem_free(prov_mech, sizeof (kcf_prov_mech_desc_t)); } /* * kcf_get_mech_entry() * * Arguments: * . The framework mechanism type * . Storage for the mechanism entry * * Description: * Retrieves the mechanism entry for the mech. * * Context: * User and interrupt contexts. * * Returns: * KCF_MECHANISM_XXX appropriate error code. * KCF_SUCCESS otherwise. */ int kcf_get_mech_entry(crypto_mech_type_t mech_type, kcf_mech_entry_t **mep) { kcf_ops_class_t class; int index; const kcf_mech_entry_tab_t *me_tab; ASSERT(mep != NULL); class = KCF_MECH2CLASS(mech_type); if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { /* the caller won't need to know it's an invalid class */ return (KCF_INVALID_MECH_NUMBER); } me_tab = &kcf_mech_tabs_tab[class]; index = KCF_MECH2INDEX(mech_type); if ((index < 0) || (index >= me_tab->met_size)) { return (KCF_INVALID_MECH_NUMBER); } *mep = &((me_tab->met_tab)[index]); return (KCF_SUCCESS); } /* * crypto_mech2id() * * Arguments: * . mechname: A null-terminated string identifying the mechanism name. * * Description: * Walks the mechanisms tables, looking for an entry that matches the * mechname. Once it find it, it builds the 64-bit mech_type and returns * it. * * Context: * Process and interruption. * * Returns: * The unique mechanism identified by 'mechname', if found. * CRYPTO_MECH_INVALID otherwise. */ /* * Lookup the hash table for an entry that matches the mechname. * If there are no providers for the mechanism, * but there is an unloaded provider, this routine will attempt * to load it. */ crypto_mech_type_t crypto_mech2id(const char *mechname) { kcf_mech_entry_t tmptab, *found; strlcpy(tmptab.me_name, mechname, CRYPTO_MAX_MECH_NAME); if ((found = avl_find(&kcf_mech_hash, &tmptab, NULL))) { ASSERT(found->me_mechid != CRYPTO_MECH_INVALID); return (found->me_mechid); } return (CRYPTO_MECH_INVALID); } #if defined(_KERNEL) EXPORT_SYMBOL(crypto_mech2id); #endif diff --git a/module/icp/include/sys/crypto/impl.h b/module/icp/include/sys/crypto/impl.h index f6b2e519f0a2..0f5ef58ac009 100644 --- a/module/icp/include/sys/crypto/impl.h +++ b/module/icp/include/sys/crypto/impl.h @@ -1,353 +1,352 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _SYS_CRYPTO_IMPL_H #define _SYS_CRYPTO_IMPL_H /* * Kernel Cryptographic Framework private implementation definitions. */ #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* * Prefixes convention: structures internal to the kernel cryptographic * framework start with 'kcf_'. Exposed structure start with 'crypto_'. */ /* * The following two macros should be * #define KCF_OPS_CLASSSIZE (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2) * #define KCF_MAXMECHTAB KCF_MAXCIPHER * * However, doing that would involve reorganizing the header file a bit. * When impl.h is broken up (bug# 4703218), this will be done. For now, * we hardcode these values. */ -#define KCF_OPS_CLASSSIZE 4 +#define KCF_OPS_CLASSSIZE 3 #define KCF_MAXMECHTAB 32 /* * Valid values for the state of a provider. The order of * the elements is important. * * Routines which get a provider or the list of providers * should pick only those that are in KCF_PROV_READY state. */ typedef enum { KCF_PROV_ALLOCATED = 1, /* * state < KCF_PROV_READY means the provider can not * be used at all. */ KCF_PROV_READY, /* * state > KCF_PROV_READY means the provider can not * be used for new requests. */ KCF_PROV_FAILED, /* * Threads setting the following two states should do so only * if the current state < KCF_PROV_DISABLED. */ KCF_PROV_DISABLED, KCF_PROV_REMOVED, KCF_PROV_FREED } kcf_prov_state_t; #define KCF_IS_PROV_USABLE(pd) ((pd)->pd_state == KCF_PROV_READY) #define KCF_IS_PROV_REMOVED(pd) ((pd)->pd_state >= KCF_PROV_REMOVED) /* * A provider descriptor structure. There is one such structure per * provider. It is allocated and initialized at registration time and * freed when the provider unregisters. * * pd_refcnt: Reference counter to this provider descriptor * pd_irefcnt: References held by the framework internal structs * pd_lock: lock protects pd_state * pd_state: State value of the provider * pd_ops_vector: The ops vector specified by Provider * pd_mech_indx: Lookup table which maps a core framework mechanism * number to an index in pd_mechanisms array * pd_mechanisms: Array of mechanisms supported by the provider, specified * by the provider during registration * pd_mech_list_count: The number of entries in pi_mechanisms, specified * by the provider during registration * pd_remove_cv: cv to wait on while the provider queue drains * pd_description: Provider description string * pd_kcf_prov_handle: KCF-private handle assigned by KCF * pd_prov_id: Identification # assigned by KCF to provider */ typedef struct kcf_provider_desc { uint_t pd_refcnt; uint_t pd_irefcnt; kmutex_t pd_lock; kcf_prov_state_t pd_state; const crypto_ops_t *pd_ops_vector; ushort_t pd_mech_indx[KCF_OPS_CLASSSIZE]\ [KCF_MAXMECHTAB]; const crypto_mech_info_t *pd_mechanisms; uint_t pd_mech_list_count; kcondvar_t pd_remove_cv; const char *pd_description; crypto_kcf_provider_handle_t pd_kcf_prov_handle; crypto_provider_id_t pd_prov_id; } kcf_provider_desc_t; /* * If a component has a reference to a kcf_provider_desc_t, * it REFHOLD()s. A new provider descriptor which is referenced only * by the providers table has a reference counter of one. */ #define KCF_PROV_REFHOLD(desc) { \ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, 1); \ ASSERT(newval != 0); \ } #define KCF_PROV_IREFHOLD(desc) { \ int newval = atomic_add_32_nv(&(desc)->pd_irefcnt, 1); \ ASSERT(newval != 0); \ } #define KCF_PROV_IREFRELE(desc) { \ membar_producer(); \ int newval = atomic_add_32_nv(&(desc)->pd_irefcnt, -1); \ ASSERT(newval != -1); \ if (newval == 0) { \ cv_broadcast(&(desc)->pd_remove_cv); \ } \ } #define KCF_PROV_REFHELD(desc) ((desc)->pd_refcnt >= 1) #define KCF_PROV_REFRELE(desc) { \ membar_producer(); \ int newval = atomic_add_32_nv(&(desc)->pd_refcnt, -1); \ ASSERT(newval != -1); \ if (newval == 0) { \ kcf_provider_zero_refcnt((desc)); \ } \ } /* * An element in a mechanism provider descriptors chain. * The kcf_prov_mech_desc_t is duplicated in every chain the provider belongs * to. This is a small tradeoff memory vs mutex spinning time to access the * common provider field. */ typedef struct kcf_prov_mech_desc { struct kcf_mech_entry *pm_me; /* Back to the head */ struct kcf_prov_mech_desc *pm_next; /* Next in the chain */ crypto_mech_info_t pm_mech_info; /* Provider mech info */ kcf_provider_desc_t *pm_prov_desc; /* Common desc. */ } kcf_prov_mech_desc_t; /* * A mechanism entry in an xxx_mech_tab[]. me_pad was deemed * to be unnecessary and removed. */ typedef struct kcf_mech_entry { crypto_mech_name_t me_name; /* mechanism name */ crypto_mech_type_t me_mechid; /* Internal id for mechanism */ kcf_prov_mech_desc_t *me_sw_prov; /* provider */ avl_node_t me_node; } kcf_mech_entry_t; /* * Global tables. The sizes are from the predefined PKCS#11 v2.20 mechanisms, * with a margin of few extra empty entry points */ #define KCF_MAXDIGEST 16 /* Digests */ #define KCF_MAXCIPHER 32 /* Ciphers */ #define KCF_MAXMAC 40 /* Message authentication codes */ _Static_assert(KCF_MAXCIPHER == KCF_MAXMECHTAB, "KCF_MAXCIPHER != KCF_MAXMECHTAB"); /* See KCF_MAXMECHTAB comment */ typedef enum { - KCF_DIGEST_CLASS = 1, - KCF_CIPHER_CLASS, + KCF_CIPHER_CLASS = 1, KCF_MAC_CLASS, } kcf_ops_class_t; -#define KCF_FIRST_OPSCLASS KCF_DIGEST_CLASS +#define KCF_FIRST_OPSCLASS KCF_CIPHER_CLASS #define KCF_LAST_OPSCLASS KCF_MAC_CLASS _Static_assert( KCF_OPS_CLASSSIZE == (KCF_LAST_OPSCLASS - KCF_FIRST_OPSCLASS + 2), "KCF_OPS_CLASSSIZE doesn't match kcf_ops_class_t!"); /* The table of all the kcf_xxx_mech_tab[]s, indexed by kcf_ops_class */ typedef struct kcf_mech_entry_tab { int met_size; /* Size of the met_tab[] */ kcf_mech_entry_t *met_tab; /* the table */ } kcf_mech_entry_tab_t; extern const kcf_mech_entry_tab_t kcf_mech_tabs_tab[]; #define KCF_MECHID(class, index) \ (((crypto_mech_type_t)(class) << 32) | (crypto_mech_type_t)(index)) #define KCF_MECH2CLASS(mech_type) ((kcf_ops_class_t)((mech_type) >> 32)) #define KCF_MECH2INDEX(mech_type) ((int)((mech_type) & 0xFFFFFFFF)) #define KCF_TO_PROV_MECH_INDX(pd, mech_type) \ ((pd)->pd_mech_indx[KCF_MECH2CLASS(mech_type)] \ [KCF_MECH2INDEX(mech_type)]) #define KCF_TO_PROV_MECHINFO(pd, mech_type) \ ((pd)->pd_mechanisms[KCF_TO_PROV_MECH_INDX(pd, mech_type)]) #define KCF_TO_PROV_MECHNUM(pd, mech_type) \ (KCF_TO_PROV_MECHINFO(pd, mech_type).cm_mech_number) /* * Return codes for internal functions */ #define KCF_SUCCESS 0x0 /* Successful call */ #define KCF_INVALID_MECH_NUMBER 0x1 /* invalid mechanism number */ #define KCF_INVALID_MECH_NAME 0x2 /* invalid mechanism name */ #define KCF_INVALID_MECH_CLASS 0x3 /* invalid mechanism class */ #define KCF_MECH_TAB_FULL 0x4 /* Need more room in the mech tabs. */ #define KCF_INVALID_INDX ((ushort_t)-1) /* * Wrappers for ops vectors. In the wrapper definitions below, the pd * argument always corresponds to a pointer to a provider descriptor * of type kcf_prov_desc_t. */ #define KCF_PROV_CIPHER_OPS(pd) ((pd)->pd_ops_vector->co_cipher_ops) #define KCF_PROV_MAC_OPS(pd) ((pd)->pd_ops_vector->co_mac_ops) #define KCF_PROV_CTX_OPS(pd) ((pd)->pd_ops_vector->co_ctx_ops) /* * Wrappers for crypto_cipher_ops(9S) entry points. */ #define KCF_PROV_ENCRYPT_ATOMIC(pd, mech, key, plaintext, ciphertext, \ template) ( \ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic) ? \ KCF_PROV_CIPHER_OPS(pd)->encrypt_atomic( \ mech, key, plaintext, ciphertext, template) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_DECRYPT_ATOMIC(pd, mech, key, ciphertext, plaintext, \ template) ( \ (KCF_PROV_CIPHER_OPS(pd) && KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic) ? \ KCF_PROV_CIPHER_OPS(pd)->decrypt_atomic( \ mech, key, ciphertext, plaintext, template) : \ CRYPTO_NOT_SUPPORTED) /* * Wrappers for crypto_mac_ops(9S) entry points. */ #define KCF_PROV_MAC_INIT(pd, ctx, mech, key, template) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_init) ? \ KCF_PROV_MAC_OPS(pd)->mac_init(ctx, mech, key, template) \ : CRYPTO_NOT_SUPPORTED) /* * The _ (underscore) in _mac is needed to avoid replacing the * function mac(). */ #define KCF_PROV_MAC_UPDATE(pd, ctx, data) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_update) ? \ KCF_PROV_MAC_OPS(pd)->mac_update(ctx, data) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_MAC_FINAL(pd, ctx, mac) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_final) ? \ KCF_PROV_MAC_OPS(pd)->mac_final(ctx, mac) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_MAC_ATOMIC(pd, mech, key, data, mac, template) ( \ (KCF_PROV_MAC_OPS(pd) && KCF_PROV_MAC_OPS(pd)->mac_atomic) ? \ KCF_PROV_MAC_OPS(pd)->mac_atomic( \ mech, key, data, mac, template) : \ CRYPTO_NOT_SUPPORTED) /* * Wrappers for crypto_ctx_ops(9S) entry points. */ #define KCF_PROV_CREATE_CTX_TEMPLATE(pd, mech, key, template, size) ( \ (KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->create_ctx_template) ? \ KCF_PROV_CTX_OPS(pd)->create_ctx_template( \ mech, key, template, size) : \ CRYPTO_NOT_SUPPORTED) #define KCF_PROV_FREE_CONTEXT(pd, ctx) ( \ (KCF_PROV_CTX_OPS(pd) && KCF_PROV_CTX_OPS(pd)->free_context) ? \ KCF_PROV_CTX_OPS(pd)->free_context(ctx) : CRYPTO_NOT_SUPPORTED) /* Miscellaneous */ extern void kcf_destroy_mech_tabs(void); extern void kcf_init_mech_tabs(void); extern int kcf_add_mech_provider(short, kcf_provider_desc_t *, kcf_prov_mech_desc_t **); extern void kcf_remove_mech_provider(const char *, kcf_provider_desc_t *); extern int kcf_get_mech_entry(crypto_mech_type_t, kcf_mech_entry_t **); extern kcf_provider_desc_t *kcf_alloc_provider_desc(void); extern void kcf_provider_zero_refcnt(kcf_provider_desc_t *); extern void kcf_free_provider_desc(kcf_provider_desc_t *); extern void undo_register_provider(kcf_provider_desc_t *, boolean_t); extern int crypto_put_output_data(uchar_t *, crypto_data_t *, int); extern int crypto_update_iov(void *, crypto_data_t *, crypto_data_t *, int (*cipher)(void *, caddr_t, size_t, crypto_data_t *)); extern int crypto_update_uio(void *, crypto_data_t *, crypto_data_t *, int (*cipher)(void *, caddr_t, size_t, crypto_data_t *)); /* Access to the provider's table */ extern void kcf_prov_tab_destroy(void); extern void kcf_prov_tab_init(void); extern int kcf_prov_tab_add_provider(kcf_provider_desc_t *); extern int kcf_prov_tab_rem_provider(crypto_provider_id_t); extern kcf_provider_desc_t *kcf_prov_tab_lookup(crypto_provider_id_t); extern int kcf_get_sw_prov(crypto_mech_type_t, kcf_provider_desc_t **, kcf_mech_entry_t **, boolean_t); #ifdef __cplusplus } #endif #endif /* _SYS_CRYPTO_IMPL_H */ diff --git a/module/icp/include/sys/crypto/spi.h b/module/icp/include/sys/crypto/spi.h index 9bcb62ac5290..e9be7e0c54d8 100644 --- a/module/icp/include/sys/crypto/spi.h +++ b/module/icp/include/sys/crypto/spi.h @@ -1,217 +1,198 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _SYS_CRYPTO_SPI_H #define _SYS_CRYPTO_SPI_H /* * CSPI: Cryptographic Service Provider Interface. */ #include #include #ifdef __cplusplus extern "C" { #endif #ifdef CONSTIFY_PLUGIN #define __no_const __attribute__((no_const)) #else #define __no_const #endif /* CONSTIFY_PLUGIN */ /* * Context templates can be used to by providers to pre-process * keying material, such as key schedules. They are allocated by * a provider create_ctx_template(9E) entry point, and passed * as argument to initialization and atomic provider entry points. */ typedef void *crypto_spi_ctx_template_t; /* * The context structure is passed from the kernel to a provider. * It contains the information needed to process a multi-part or * single part operation. The context structure is not used * by atomic operations. * * Parameters needed to perform a cryptographic operation, such * as keys, mechanisms, input and output buffers, are passed * as separate arguments to Provider routines. */ typedef struct crypto_ctx { void *cc_provider_private; /* owned by provider */ void *cc_framework_private; /* owned by framework */ } crypto_ctx_t; -/* - * The crypto_digest_ops structure contains pointers to digest - * operations for cryptographic providers. It is passed through - * the crypto_ops(9S) structure when providers register with the - * kernel using crypto_register_provider(9F). - */ -typedef struct crypto_digest_ops { - int (*digest_init)(crypto_ctx_t *, crypto_mechanism_t *); - int (*digest)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); - int (*digest_update)(crypto_ctx_t *, crypto_data_t *); - int (*digest_key)(crypto_ctx_t *, crypto_key_t *); - int (*digest_final)(crypto_ctx_t *, crypto_data_t *); - int (*digest_atomic)(crypto_mechanism_t *, crypto_data_t *, - crypto_data_t *); -} __no_const crypto_digest_ops_t; - /* * The crypto_cipher_ops structure contains pointers to encryption * and decryption operations for cryptographic providers. It is * passed through the crypto_ops(9S) structure when providers register * with the kernel using crypto_register_provider(9F). */ typedef struct crypto_cipher_ops { int (*encrypt_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); int (*decrypt_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_cipher_ops_t; /* * The crypto_mac_ops structure contains pointers to MAC * operations for cryptographic providers. It is passed through * the crypto_ops(9S) structure when providers register with the * kernel using crypto_register_provider(9F). */ typedef struct crypto_mac_ops { int (*mac_init)(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); int (*mac)(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); int (*mac_update)(crypto_ctx_t *, crypto_data_t *); int (*mac_final)(crypto_ctx_t *, crypto_data_t *); int (*mac_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); int (*mac_verify_atomic)(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); } __no_const crypto_mac_ops_t; /* * The crypto_ctx_ops structure contains points to context and context * templates management operations for cryptographic providers. It is * passed through the crypto_ops(9S) structure when providers register * with the kernel using crypto_register_provider(9F). */ typedef struct crypto_ctx_ops { int (*create_ctx_template)(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); int (*free_context)(crypto_ctx_t *); } __no_const crypto_ctx_ops_t; /* * The crypto_ops(9S) structure contains the structures containing * the pointers to functions implemented by cryptographic providers. * It is specified as part of the crypto_provider_info(9S) * supplied by a provider when it registers with the kernel * by calling crypto_register_provider(9F). */ typedef struct crypto_ops { - const crypto_digest_ops_t *co_digest_ops; const crypto_cipher_ops_t *co_cipher_ops; const crypto_mac_ops_t *co_mac_ops; const crypto_ctx_ops_t *co_ctx_ops; } crypto_ops_t; /* * The mechanism info structure crypto_mech_info_t contains a function group * bit mask cm_func_group_mask. This field, of type crypto_func_group_t, * specifies the provider entry point that can be used a particular * mechanism. The function group mask is a combination of the following values. */ typedef uint32_t crypto_func_group_t; -#define CRYPTO_FG_DIGEST 0x00000004 /* digest_init() */ #define CRYPTO_FG_MAC 0x00001000 /* mac_init() */ #define CRYPTO_FG_ENCRYPT_ATOMIC 0x00008000 /* encrypt_atomic() */ #define CRYPTO_FG_DECRYPT_ATOMIC 0x00010000 /* decrypt_atomic() */ #define CRYPTO_FG_MAC_ATOMIC 0x00020000 /* mac_atomic() */ -#define CRYPTO_FG_DIGEST_ATOMIC 0x00040000 /* digest_atomic() */ /* * Maximum length of the pi_provider_description field of the * crypto_provider_info structure. */ #define CRYPTO_PROVIDER_DESCR_MAX_LEN 64 /* * The crypto_mech_info structure specifies one of the mechanisms * supported by a cryptographic provider. The pi_mechanisms field of * the crypto_provider_info structure contains a pointer to an array * of crypto_mech_info's. */ typedef struct crypto_mech_info { crypto_mech_name_t cm_mech_name; crypto_mech_type_t cm_mech_number; crypto_func_group_t cm_func_group_mask; } crypto_mech_info_t; /* * crypto_kcf_provider_handle_t is a handle allocated by the kernel. * It is returned after the provider registers with * crypto_register_provider(), and must be specified by the provider * when calling crypto_unregister_provider(), and * crypto_provider_notification(). */ typedef uint_t crypto_kcf_provider_handle_t; /* * Provider information. Passed as argument to crypto_register_provider(9F). * Describes the provider and its capabilities. */ typedef struct crypto_provider_info { const char *pi_provider_description; const crypto_ops_t *pi_ops_vector; uint_t pi_mech_list_count; const crypto_mech_info_t *pi_mechanisms; } crypto_provider_info_t; /* * Functions exported by Solaris to cryptographic providers. Providers * call these functions to register and unregister, notify the kernel * of state changes, and notify the kernel when a asynchronous request * completed. */ extern int crypto_register_provider(const crypto_provider_info_t *, crypto_kcf_provider_handle_t *); extern int crypto_unregister_provider(crypto_kcf_provider_handle_t); #ifdef __cplusplus } #endif #endif /* _SYS_CRYPTO_SPI_H */ diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index a68a878b6ab2..8ee2d036c1e0 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -1,498 +1,497 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. */ /* * AES provider for the Kernel Cryptographic Framework (KCF) */ #include #include #include #include #include #include #define _AES_IMPL #include #include /* * Mechanism info structure passed to KCF during registration. */ static const crypto_mech_info_t aes_mech_info_tab[] = { /* AES_CCM */ {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC}, /* AES_GCM */ {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC}, }; static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, crypto_mechanism_t *, crypto_key_t *, int, boolean_t); static int aes_encrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static int aes_decrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_cipher_ops_t aes_cipher_ops = { .encrypt_atomic = aes_encrypt_atomic, .decrypt_atomic = aes_decrypt_atomic }; static int aes_create_ctx_template(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); static int aes_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t aes_ctx_ops = { .create_ctx_template = aes_create_ctx_template, .free_context = aes_free_context }; static const crypto_ops_t aes_crypto_ops = { - NULL, &aes_cipher_ops, NULL, &aes_ctx_ops, }; static const crypto_provider_info_t aes_prov_info = { "AES Software Provider", &aes_crypto_ops, sizeof (aes_mech_info_tab) / sizeof (crypto_mech_info_t), aes_mech_info_tab }; static crypto_kcf_provider_handle_t aes_prov_handle = 0; int aes_mod_init(void) { /* Determine the fastest available implementation. */ aes_impl_init(); gcm_impl_init(); /* Register with KCF. If the registration fails, remove the module. */ if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) return (EACCES); return (0); } int aes_mod_fini(void) { /* Unregister from KCF if module is registered */ if (aes_prov_handle != 0) { if (crypto_unregister_provider(aes_prov_handle)) return (EBUSY); aes_prov_handle = 0; } return (0); } static int aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx) { void *p = NULL; boolean_t param_required = B_TRUE; size_t param_len; void *(*alloc_fun)(int); int rv = CRYPTO_SUCCESS; switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_CCM_PARAMS); alloc_fun = ccm_alloc_ctx; break; case AES_GCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_GCM_PARAMS); alloc_fun = gcm_alloc_ctx; break; default: __builtin_unreachable(); } if (param_required && mechanism->cm_param != NULL && mechanism->cm_param_len != param_len) { rv = CRYPTO_MECHANISM_PARAM_INVALID; } if (ctx != NULL) { p = (alloc_fun)(KM_SLEEP); *ctx = p; } return (rv); } /* * Initialize key schedules for AES */ static int init_keysched(crypto_key_t *key, void *newbie) { if (key->ck_length < AES_MINBITS || key->ck_length > AES_MAXBITS) { return (CRYPTO_KEY_SIZE_RANGE); } /* key length must be either 128, 192, or 256 */ if ((key->ck_length & 63) != 0) return (CRYPTO_KEY_SIZE_RANGE); aes_init_keysched(key->ck_data, key->ck_length, newbie); return (CRYPTO_SUCCESS); } /* * KCF software provider encrypt entry points. */ static int aes_encrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(ciphertext != NULL); if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_TRUE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_mac_len; break; case AES_GCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_tag_len; break; default: __builtin_unreachable(); } /* return size of buffer needed to store output */ if (ciphertext->cd_length < length_needed) { ciphertext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = ciphertext->cd_offset; saved_length = ciphertext->cd_length; /* * Do an update on the specified input data. */ switch (plaintext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); } else { ASSERT(aes_ctx.ac_remainder_len == 0); } if (plaintext != ciphertext) { ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } } else { ciphertext->cd_length = saved_length; } ciphertext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } static int aes_decrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(plaintext != NULL); if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_FALSE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = aes_ctx.ac_data_len; break; case AES_GCM_MECH_INFO_TYPE: length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len; break; default: __builtin_unreachable(); } /* return size of buffer needed to store output */ if (plaintext->cd_length < length_needed) { plaintext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; /* * Do an update on the specified input data. */ switch (ciphertext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ASSERT(aes_ctx.ac_processed_data_len == aes_ctx.ac_data_len); ASSERT(aes_ctx.ac_processed_mac_len == aes_ctx.ac_mac_len); ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } } else __builtin_unreachable(); } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } if (aes_ctx.ac_flags & CCM_MODE) { if (aes_ctx.ac_pt_buf != NULL) { vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len); } } else if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } /* * KCF software provider context template entry points. */ static int aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size) { void *keysched; size_t size; int rv; if (mechanism->cm_type != AES_CCM_MECH_INFO_TYPE && mechanism->cm_type != AES_GCM_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); if ((keysched = aes_alloc_keysched(&size, KM_SLEEP)) == NULL) { return (CRYPTO_HOST_MEMORY); } /* * Initialize key schedule. Key length information is stored * in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { memset(keysched, 0, size); kmem_free(keysched, size); return (rv); } *tmpl = keysched; *tmpl_size = size; return (CRYPTO_SUCCESS); } static int aes_free_context(crypto_ctx_t *ctx) { aes_ctx_t *aes_ctx = ctx->cc_provider_private; if (aes_ctx != NULL) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { ASSERT(aes_ctx->ac_keysched_len != 0); memset(aes_ctx->ac_keysched, 0, aes_ctx->ac_keysched_len); kmem_free(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); } crypto_free_mode_ctx(aes_ctx); ctx->cc_provider_private = NULL; } return (CRYPTO_SUCCESS); } static int aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag, boolean_t is_encrypt_init) { int rv = CRYPTO_SUCCESS; void *keysched; size_t size = 0; if (template == NULL) { if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) return (CRYPTO_HOST_MEMORY); /* * Initialize key schedule. * Key length is stored in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { kmem_free(keysched, size); return (rv); } aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE; aes_ctx->ac_keysched_len = size; } else { keysched = template; } aes_ctx->ac_keysched = keysched; switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param, kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); break; case AES_GCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); break; } if (rv != CRYPTO_SUCCESS) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(keysched, 0, size); kmem_free(keysched, size); } } return (rv); } diff --git a/module/icp/io/sha2_mod.c b/module/icp/io/sha2_mod.c index f068951b07f5..c8e3b4fccdd1 100644 --- a/module/icp/io/sha2_mod.c +++ b/module/icp/io/sha2_mod.c @@ -1,1292 +1,1012 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2010 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include #include #include #include /* * Macros to access the SHA2 or SHA2-HMAC contexts from a context passed * by KCF to one of the entry points. */ #define PROV_SHA2_CTX(ctx) ((sha2_ctx_t *)(ctx)->cc_provider_private) #define PROV_SHA2_HMAC_CTX(ctx) ((sha2_hmac_ctx_t *)(ctx)->cc_provider_private) /* to extract the digest length passed as mechanism parameter */ #define PROV_SHA2_GET_DIGEST_LEN(m, len) { \ if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \ (len) = (uint32_t)*((ulong_t *)(m)->cm_param); \ else { \ ulong_t tmp_ulong; \ memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t)); \ (len) = (uint32_t)tmp_ulong; \ } \ } #define PROV_SHA2_DIGEST_KEY(mech, ctx, key, len, digest) { \ SHA2Init(mech, ctx); \ SHA2Update(ctx, key, len); \ SHA2Final(digest, ctx); \ } /* * Mechanism info structure passed to KCF during registration. */ static const crypto_mech_info_t sha2_mech_info_tab[] = { /* SHA256 */ - {SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, + {SUN_CKM_SHA256, SHA256_MECH_INFO_TYPE, 0}, /* SHA256-HMAC */ {SUN_CKM_SHA256_HMAC, SHA256_HMAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, /* SHA256-HMAC GENERAL */ {SUN_CKM_SHA256_HMAC_GENERAL, SHA256_HMAC_GEN_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, /* SHA384 */ - {SUN_CKM_SHA384, SHA384_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, + {SUN_CKM_SHA384, SHA384_MECH_INFO_TYPE, 0}, /* SHA384-HMAC */ {SUN_CKM_SHA384_HMAC, SHA384_HMAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, /* SHA384-HMAC GENERAL */ {SUN_CKM_SHA384_HMAC_GENERAL, SHA384_HMAC_GEN_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, /* SHA512 */ - {SUN_CKM_SHA512, SHA512_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, + {SUN_CKM_SHA512, SHA512_MECH_INFO_TYPE, 0}, /* SHA512-HMAC */ {SUN_CKM_SHA512_HMAC, SHA512_HMAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, /* SHA512-HMAC GENERAL */ {SUN_CKM_SHA512_HMAC_GENERAL, SHA512_HMAC_GEN_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, }; -static int sha2_digest_init(crypto_ctx_t *, crypto_mechanism_t *); -static int sha2_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); -static int sha2_digest_update(crypto_ctx_t *, crypto_data_t *); -static int sha2_digest_final(crypto_ctx_t *, crypto_data_t *); -static int sha2_digest_atomic(crypto_mechanism_t *, crypto_data_t *, - crypto_data_t *); - -static const crypto_digest_ops_t sha2_digest_ops = { - .digest_init = sha2_digest_init, - .digest = sha2_digest, - .digest_update = sha2_digest_update, - .digest_final = sha2_digest_final, - .digest_atomic = sha2_digest_atomic -}; - static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *); static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *); static int sha2_mac_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static int sha2_mac_verify_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_mac_ops_t sha2_mac_ops = { .mac_init = sha2_mac_init, .mac = NULL, .mac_update = sha2_mac_update, .mac_final = sha2_mac_final, .mac_atomic = sha2_mac_atomic, .mac_verify_atomic = sha2_mac_verify_atomic }; static int sha2_create_ctx_template(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); static int sha2_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t sha2_ctx_ops = { .create_ctx_template = sha2_create_ctx_template, .free_context = sha2_free_context }; static const crypto_ops_t sha2_crypto_ops = { - &sha2_digest_ops, NULL, &sha2_mac_ops, &sha2_ctx_ops, }; static const crypto_provider_info_t sha2_prov_info = { "SHA2 Software Provider", &sha2_crypto_ops, sizeof (sha2_mech_info_tab) / sizeof (crypto_mech_info_t), sha2_mech_info_tab }; static crypto_kcf_provider_handle_t sha2_prov_handle = 0; int sha2_mod_init(void) { int ret; /* * Register with KCF. If the registration fails, log an * error but do not uninstall the module, since the functionality * provided by misc/sha2 should still be available. */ if ((ret = crypto_register_provider(&sha2_prov_info, &sha2_prov_handle)) != CRYPTO_SUCCESS) cmn_err(CE_WARN, "sha2 _init: " "crypto_register_provider() failed (0x%x)", ret); return (0); } int sha2_mod_fini(void) { int ret = 0; if (sha2_prov_handle != 0) { if ((ret = crypto_unregister_provider(sha2_prov_handle)) != CRYPTO_SUCCESS) { cmn_err(CE_WARN, "sha2 _fini: crypto_unregister_provider() " "failed (0x%x)", ret); return (EBUSY); } sha2_prov_handle = 0; } return (ret); } -/* - * KCF software provider digest entry points. - */ - -static int -sha2_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism) -{ - - /* - * Allocate and initialize SHA2 context. - */ - ctx->cc_provider_private = kmem_alloc(sizeof (sha2_ctx_t), KM_SLEEP); - if (ctx->cc_provider_private == NULL) - return (CRYPTO_HOST_MEMORY); - - PROV_SHA2_CTX(ctx)->sc_mech_type = mechanism->cm_type; - SHA2Init(mechanism->cm_type, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx); - - return (CRYPTO_SUCCESS); -} - /* * Helper SHA2 digest update function for uio data. */ static int sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data) { off_t offset = data->cd_offset; size_t length = data->cd_length; uint_t vec_idx = 0; size_t cur_len; /* we support only kernel buffer */ if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx); if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. */ return (CRYPTO_DATA_LEN_RANGE); } /* * Now do the digesting on the iovecs. */ while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) - offset, length); SHA2Update(sha2_ctx, (uint8_t *)zfs_uio_iovbase(data->cd_uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. * The caller requested to digest more data than it provided. */ return (CRYPTO_DATA_LEN_RANGE); } return (CRYPTO_SUCCESS); } /* * Helper SHA2 digest final function for uio data. * digest_len is the length of the desired digest. If digest_len * is smaller than the default SHA2 digest length, the caller * must pass a scratch buffer, digest_scratch, which must * be at least the algorithm's digest length bytes. */ static int sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest, ulong_t digest_len, uchar_t *digest_scratch) { off_t offset = digest->cd_offset; uint_t vec_idx = 0; /* we support only kernel buffer */ if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to * be returned. */ offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx); if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers * it provided. */ return (CRYPTO_DATA_LEN_RANGE); } if (offset + digest_len <= zfs_uio_iovlen(digest->cd_uio, vec_idx)) { /* * The computed SHA2 digest will fit in the current * iovec. */ if (((sha2_ctx->algotype <= SHA256_HMAC_GEN_MECH_INFO_TYPE) && (digest_len != SHA256_DIGEST_LENGTH)) || ((sha2_ctx->algotype > SHA256_HMAC_GEN_MECH_INFO_TYPE) && (digest_len != SHA512_DIGEST_LENGTH))) { /* * The caller requested a short digest. Digest * into a scratch buffer and return to * the user only what was requested. */ SHA2Final(digest_scratch, sha2_ctx); memcpy((uchar_t *) zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, digest_scratch, digest_len); } else { SHA2Final((uchar_t *)zfs_uio_iovbase(digest-> cd_uio, vec_idx) + offset, sha2_ctx); } } else { /* * The computed digest will be crossing one or more iovec's. * This is bad performance-wise but we need to support it. * Allocate a small scratch buffer on the stack and * copy it piece meal to the specified digest iovec's. */ uchar_t digest_tmp[SHA512_DIGEST_LENGTH]; off_t scratch_offset = 0; size_t length = digest_len; size_t cur_len; SHA2Final(digest_tmp, sha2_ctx); while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) - offset, length); memcpy( zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset, digest_tmp + scratch_offset, cur_len); length -= cur_len; vec_idx++; scratch_offset += cur_len; offset = 0; } if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. * The caller requested to digest more data than it * provided. */ return (CRYPTO_DATA_LEN_RANGE); } } return (CRYPTO_SUCCESS); } -static int -sha2_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest) -{ - int ret = CRYPTO_SUCCESS; - uint_t sha_digest_len; - - ASSERT(ctx->cc_provider_private != NULL); - - switch (PROV_SHA2_CTX(ctx)->sc_mech_type) { - case SHA256_MECH_INFO_TYPE: - sha_digest_len = SHA256_DIGEST_LENGTH; - break; - case SHA384_MECH_INFO_TYPE: - sha_digest_len = SHA384_DIGEST_LENGTH; - break; - case SHA512_MECH_INFO_TYPE: - sha_digest_len = SHA512_DIGEST_LENGTH; - break; - default: - return (CRYPTO_MECHANISM_INVALID); - } - - /* - * We need to just return the length needed to store the output. - * We should not destroy the context for the following cases. - */ - if ((digest->cd_length == 0) || - (digest->cd_length < sha_digest_len)) { - digest->cd_length = sha_digest_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - /* - * Do the SHA2 update on the specified input data. - */ - switch (data->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - (uint8_t *)data->cd_raw.iov_base + data->cd_offset, - data->cd_length); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - data); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - if (ret != CRYPTO_SUCCESS) { - /* the update failed, free context and bail */ - kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t)); - ctx->cc_provider_private = NULL; - digest->cd_length = 0; - return (ret); - } - - /* - * Do a SHA2 final, must be done separately since the digest - * type can be different than the input data type. - */ - switch (digest->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Final((unsigned char *)digest->cd_raw.iov_base + - digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - digest, sha_digest_len, NULL); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - /* all done, free context and return */ - - if (ret == CRYPTO_SUCCESS) - digest->cd_length = sha_digest_len; - else - digest->cd_length = 0; - - kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t)); - ctx->cc_provider_private = NULL; - return (ret); -} - -static int -sha2_digest_update(crypto_ctx_t *ctx, crypto_data_t *data) -{ - int ret = CRYPTO_SUCCESS; - - ASSERT(ctx->cc_provider_private != NULL); - - /* - * Do the SHA2 update on the specified input data. - */ - switch (data->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Update(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - (uint8_t *)data->cd_raw.iov_base + data->cd_offset, - data->cd_length); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_update_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - data); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - return (ret); -} - -static int -sha2_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest) -{ - int ret = CRYPTO_SUCCESS; - uint_t sha_digest_len; - - ASSERT(ctx->cc_provider_private != NULL); - - switch (PROV_SHA2_CTX(ctx)->sc_mech_type) { - case SHA256_MECH_INFO_TYPE: - sha_digest_len = SHA256_DIGEST_LENGTH; - break; - case SHA384_MECH_INFO_TYPE: - sha_digest_len = SHA384_DIGEST_LENGTH; - break; - case SHA512_MECH_INFO_TYPE: - sha_digest_len = SHA512_DIGEST_LENGTH; - break; - default: - return (CRYPTO_MECHANISM_INVALID); - } - - /* - * We need to just return the length needed to store the output. - * We should not destroy the context for the following cases. - */ - if ((digest->cd_length == 0) || - (digest->cd_length < sha_digest_len)) { - digest->cd_length = sha_digest_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - - /* - * Do a SHA2 final. - */ - switch (digest->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Final((unsigned char *)digest->cd_raw.iov_base + - digest->cd_offset, &PROV_SHA2_CTX(ctx)->sc_sha2_ctx); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_final_uio(&PROV_SHA2_CTX(ctx)->sc_sha2_ctx, - digest, sha_digest_len, NULL); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - /* all done, free context and return */ - - if (ret == CRYPTO_SUCCESS) - digest->cd_length = sha_digest_len; - else - digest->cd_length = 0; - - kmem_free(ctx->cc_provider_private, sizeof (sha2_ctx_t)); - ctx->cc_provider_private = NULL; - - return (ret); -} - -static int -sha2_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data, - crypto_data_t *digest) -{ - int ret = CRYPTO_SUCCESS; - SHA2_CTX sha2_ctx; - uint32_t sha_digest_len; - - /* - * Do the SHA inits. - */ - - SHA2Init(mechanism->cm_type, &sha2_ctx); - - switch (data->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Update(&sha2_ctx, (uint8_t *)data-> - cd_raw.iov_base + data->cd_offset, data->cd_length); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_update_uio(&sha2_ctx, data); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - /* - * Do the SHA updates on the specified input data. - */ - - if (ret != CRYPTO_SUCCESS) { - /* the update failed, bail */ - digest->cd_length = 0; - return (ret); - } - - if (mechanism->cm_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE) - sha_digest_len = SHA256_DIGEST_LENGTH; - else - sha_digest_len = SHA512_DIGEST_LENGTH; - - /* - * Do a SHA2 final, must be done separately since the digest - * type can be different than the input data type. - */ - switch (digest->cd_format) { - case CRYPTO_DATA_RAW: - SHA2Final((unsigned char *)digest->cd_raw.iov_base + - digest->cd_offset, &sha2_ctx); - break; - case CRYPTO_DATA_UIO: - ret = sha2_digest_final_uio(&sha2_ctx, digest, - sha_digest_len, NULL); - break; - default: - ret = CRYPTO_ARGUMENTS_BAD; - } - - if (ret == CRYPTO_SUCCESS) - digest->cd_length = sha_digest_len; - else - digest->cd_length = 0; - - return (ret); -} - /* * KCF software provider mac entry points. * * SHA2 HMAC is: SHA2(key XOR opad, SHA2(key XOR ipad, text)) * * Init: * The initialization routine initializes what we denote * as the inner and outer contexts by doing * - for inner context: SHA2(key XOR ipad) * - for outer context: SHA2(key XOR opad) * * Update: * Each subsequent SHA2 HMAC update will result in an * update of the inner context with the specified data. * * Final: * The SHA2 HMAC final will do a SHA2 final operation on the * inner context, and the resulting digest will be used * as the data for an update on the outer context. Last * but not least, a SHA2 final on the outer context will * be performed to obtain the SHA2 HMAC digest to return * to the user. */ /* * Initialize a SHA2-HMAC context. */ static void sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes) { uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0}; uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0}; int i, block_size, blocks_per_int64; /* Determine the block size */ if (ctx->hc_mech_type <= SHA256_HMAC_GEN_MECH_INFO_TYPE) { block_size = SHA256_HMAC_BLOCK_SIZE; blocks_per_int64 = SHA256_HMAC_BLOCK_SIZE / sizeof (uint64_t); } else { block_size = SHA512_HMAC_BLOCK_SIZE; blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t); } (void) memset(ipad, 0, block_size); (void) memset(opad, 0, block_size); if (keyval != NULL) { (void) memcpy(ipad, keyval, length_in_bytes); (void) memcpy(opad, keyval, length_in_bytes); } else { ASSERT0(length_in_bytes); } /* XOR key with ipad (0x36) and opad (0x5c) */ for (i = 0; i < blocks_per_int64; i ++) { ipad[i] ^= 0x3636363636363636; opad[i] ^= 0x5c5c5c5c5c5c5c5c; } /* perform SHA2 on ipad */ SHA2Init(ctx->hc_mech_type, &ctx->hc_icontext); SHA2Update(&ctx->hc_icontext, (uint8_t *)ipad, block_size); /* perform SHA2 on opad */ SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext); SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size); } /* */ static int sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t ctx_template) { int ret = CRYPTO_SUCCESS; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); uint_t sha_digest_len, sha_hmac_block_size; /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } ctx->cc_provider_private = kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP); if (ctx->cc_provider_private == NULL) return (CRYPTO_HOST_MEMORY); PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type; if (ctx_template != NULL) { /* reuse context template */ memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template, sizeof (sha2_hmac_ctx_t)); } else { /* no context template, compute context */ if (keylen_in_bytes > sha_hmac_block_size) { uchar_t digested_key[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &hmac_ctx->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx), digested_key, sha_digest_len); } else { sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx), key->ck_data, keylen_in_bytes); } } /* * Get the mechanism parameters, if applicable. */ if (mechanism->cm_type % 3 == 2) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; } else { PROV_SHA2_GET_DIGEST_LEN(mechanism, PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len); if (PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len > sha_digest_len) ret = CRYPTO_MECHANISM_PARAM_INVALID; } } if (ret != CRYPTO_SUCCESS) { memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); ctx->cc_provider_private = NULL; } return (ret); } static int sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data) { int ret = CRYPTO_SUCCESS; ASSERT(ctx->cc_provider_private != NULL); /* * Do a SHA2 update of the inner context using the specified * data. */ switch (data->cd_format) { case CRYPTO_DATA_RAW: SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, (uint8_t *)data->cd_raw.iov_base + data->cd_offset, data->cd_length); break; case CRYPTO_DATA_UIO: ret = sha2_digest_update_uio( &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, data); break; default: ret = CRYPTO_ARGUMENTS_BAD; } return (ret); } static int sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac) { int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; uint32_t digest_len, sha_digest_len; ASSERT(ctx->cc_provider_private != NULL); /* Set the digest lengths to values appropriate to the mechanism */ switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) { case SHA256_HMAC_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA256_DIGEST_LENGTH; break; case SHA384_HMAC_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA384_DIGEST_LENGTH; break; case SHA512_HMAC_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA512_DIGEST_LENGTH; break; case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA256_DIGEST_LENGTH; digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len; break; case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA512_DIGEST_LENGTH; digest_len = PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len; break; default: return (CRYPTO_ARGUMENTS_BAD); } /* * We need to just return the length needed to store the output. * We should not destroy the context for the following cases. */ if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) { mac->cd_length = digest_len; return (CRYPTO_BUFFER_TOO_SMALL); } /* * Do a SHA2 final on the inner context. */ SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext); /* * Do a SHA2 update on the outer context, feeding the inner * digest as data. */ SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, digest, sha_digest_len); /* * Do a SHA2 final on the outer context, storing the computing * digest in the users buffer. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (digest_len != sha_digest_len) { /* * The caller requested a short digest. Digest * into a scratch buffer and return to * the user only what was requested. */ SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext); memcpy((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest, digest_len); } else { SHA2Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext); } break; case CRYPTO_DATA_UIO: ret = sha2_digest_final_uio( &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, mac, digest_len, digest); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) mac->cd_length = digest_len; else mac->cd_length = 0; memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); ctx->cc_provider_private = NULL; return (ret); } #define SHA2_MAC_UPDATE(data, ctx, ret) { \ switch (data->cd_format) { \ case CRYPTO_DATA_RAW: \ SHA2Update(&(ctx).hc_icontext, \ (uint8_t *)data->cd_raw.iov_base + \ data->cd_offset, data->cd_length); \ break; \ case CRYPTO_DATA_UIO: \ ret = sha2_digest_update_uio(&(ctx).hc_icontext, data); \ break; \ default: \ ret = CRYPTO_ARGUMENTS_BAD; \ } \ } static int sha2_mac_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template) { int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t sha2_hmac_ctx; uint32_t sha_digest_len, digest_len, sha_hmac_block_size; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } if (ctx_template != NULL) { /* reuse context template */ memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t)); } else { sha2_hmac_ctx.hc_mech_type = mechanism->cm_type; /* no context template, initialize context */ if (keylen_in_bytes > sha_hmac_block_size) { /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &sha2_hmac_ctx.hc_icontext, key->ck_data, keylen_in_bytes, digest); sha2_mac_init_ctx(&sha2_hmac_ctx, digest, sha_digest_len); } else { sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data, keylen_in_bytes); } } /* get the mechanism parameters, if applicable */ if ((mechanism->cm_type % 3) == 2) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len); if (digest_len > sha_digest_len) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } } /* do a SHA2 update of the inner context using the specified data */ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret); if (ret != CRYPTO_SUCCESS) /* the update failed, free context and bail */ goto bail; /* * Do a SHA2 final on the inner context. */ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext); /* * Do an SHA2 update on the outer context, feeding the inner * digest as data. * * HMAC-SHA384 needs special handling as the outer hash needs only 48 * bytes of the inner hash value. */ if (mechanism->cm_type == SHA384_HMAC_MECH_INFO_TYPE || mechanism->cm_type == SHA384_HMAC_GEN_MECH_INFO_TYPE) SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, SHA384_DIGEST_LENGTH); else SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len); /* * Do a SHA2 final on the outer context, storing the computed * digest in the users buffer. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (digest_len != sha_digest_len) { /* * The caller requested a short digest. Digest * into a scratch buffer and return to * the user only what was requested. */ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext); memcpy((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest, digest_len); } else { SHA2Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, &sha2_hmac_ctx.hc_ocontext); } break; case CRYPTO_DATA_UIO: ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac, digest_len, digest); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { mac->cd_length = digest_len; return (CRYPTO_SUCCESS); } bail: memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t)); mac->cd_length = 0; return (ret); } static int sha2_mac_verify_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template) { int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t sha2_hmac_ctx; uint32_t sha_digest_len, digest_len, sha_hmac_block_size; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } if (ctx_template != NULL) { /* reuse context template */ memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t)); } else { sha2_hmac_ctx.hc_mech_type = mechanism->cm_type; /* no context template, initialize context */ if (keylen_in_bytes > sha_hmac_block_size) { /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &sha2_hmac_ctx.hc_icontext, key->ck_data, keylen_in_bytes, digest); sha2_mac_init_ctx(&sha2_hmac_ctx, digest, sha_digest_len); } else { sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data, keylen_in_bytes); } } /* get the mechanism parameters, if applicable */ if (mechanism->cm_type % 3 == 2) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len); if (digest_len > sha_digest_len) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } } if (mac->cd_length != digest_len) { ret = CRYPTO_INVALID_MAC; goto bail; } /* do a SHA2 update of the inner context using the specified data */ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret); if (ret != CRYPTO_SUCCESS) /* the update failed, free context and bail */ goto bail; /* do a SHA2 final on the inner context */ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext); /* * Do an SHA2 update on the outer context, feeding the inner * digest as data. * * HMAC-SHA384 needs special handling as the outer hash needs only 48 * bytes of the inner hash value. */ if (mechanism->cm_type == SHA384_HMAC_MECH_INFO_TYPE || mechanism->cm_type == SHA384_HMAC_GEN_MECH_INFO_TYPE) SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, SHA384_DIGEST_LENGTH); else SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len); /* * Do a SHA2 final on the outer context, storing the computed * digest in the users buffer. */ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext); /* * Compare the computed digest against the expected digest passed * as argument. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest_len) != 0) ret = CRYPTO_INVALID_MAC; break; case CRYPTO_DATA_UIO: { off_t offset = mac->cd_offset; uint_t vec_idx = 0; off_t scratch_offset = 0; size_t length = digest_len; size_t cur_len; /* we support only kernel buffer */ if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx); if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) { /* * The caller specified an offset that is * larger than the total size of the buffers * it provided. */ ret = CRYPTO_DATA_LEN_RANGE; break; } /* do the comparison of computed digest vs specified one */ while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) - offset, length); if (memcmp(digest + scratch_offset, zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; } length -= cur_len; vec_idx++; scratch_offset += cur_len; offset = 0; } break; } default: ret = CRYPTO_ARGUMENTS_BAD; } return (ret); bail: memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t)); mac->cd_length = 0; return (ret); } /* * KCF software provider context management entry points. */ static int sha2_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size) { sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); uint32_t sha_digest_len, sha_hmac_block_size; /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } /* * Allocate and initialize SHA2 context. */ sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP); if (sha2_hmac_ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type; if (keylen_in_bytes > sha_hmac_block_size) { uchar_t digested_key[SHA512_DIGEST_LENGTH]; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &sha2_hmac_ctx_tmpl->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key, sha_digest_len); } else { sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data, keylen_in_bytes); } *ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl; *ctx_template_size = sizeof (sha2_hmac_ctx_t); return (CRYPTO_SUCCESS); } static int sha2_free_context(crypto_ctx_t *ctx) { uint_t ctx_len; if (ctx->cc_provider_private == NULL) return (CRYPTO_SUCCESS); /* * We have to free either SHA2 or SHA2-HMAC contexts, which * have different lengths. * * Note: Below is dependent on the mechanism ordering. */ if (PROV_SHA2_CTX(ctx)->sc_mech_type % 3 == 0) ctx_len = sizeof (sha2_ctx_t); else ctx_len = sizeof (sha2_hmac_ctx_t); memset(ctx->cc_provider_private, 0, ctx_len); kmem_free(ctx->cc_provider_private, ctx_len); ctx->cc_provider_private = NULL; return (CRYPTO_SUCCESS); } diff --git a/module/icp/io/skein_mod.c b/module/icp/io/skein_mod.c index 221e1debd45b..3e969513be6e 100644 --- a/module/icp/io/skein_mod.c +++ b/module/icp/io/skein_mod.c @@ -1,656 +1,515 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2013 Saso Kiselkov. All rights reserved. */ #include #include #include #include #define SKEIN_MODULE_IMPL #include static const crypto_mech_info_t skein_mech_info_tab[] = { - {CKM_SKEIN_256, SKEIN_256_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, {CKM_SKEIN_256_MAC, SKEIN_256_MAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, - {CKM_SKEIN_512, SKEIN_512_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, {CKM_SKEIN_512_MAC, SKEIN_512_MAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, - {CKM_SKEIN1024, SKEIN1024_MECH_INFO_TYPE, - CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC}, {CKM_SKEIN1024_MAC, SKEIN1024_MAC_MECH_INFO_TYPE, CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, }; -static int skein_digest_init(crypto_ctx_t *, crypto_mechanism_t *); -static int skein_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int skein_update(crypto_ctx_t *, crypto_data_t *); static int skein_final(crypto_ctx_t *, crypto_data_t *); -static int skein_digest_atomic(crypto_mechanism_t *, crypto_data_t *, - crypto_data_t *); - -static const crypto_digest_ops_t skein_digest_ops = { - .digest_init = skein_digest_init, - .digest = skein_digest, - .digest_update = skein_update, - .digest_final = skein_final, - .digest_atomic = skein_digest_atomic -}; static int skein_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); static int skein_mac_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_mac_ops_t skein_mac_ops = { .mac_init = skein_mac_init, .mac = NULL, .mac_update = skein_update, /* using regular digest update is OK here */ .mac_final = skein_final, /* using regular digest final is OK here */ .mac_atomic = skein_mac_atomic, .mac_verify_atomic = NULL }; static int skein_create_ctx_template(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); static int skein_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t skein_ctx_ops = { .create_ctx_template = skein_create_ctx_template, .free_context = skein_free_context }; static const crypto_ops_t skein_crypto_ops = { - &skein_digest_ops, NULL, &skein_mac_ops, &skein_ctx_ops, }; static const crypto_provider_info_t skein_prov_info = { "Skein Software Provider", &skein_crypto_ops, sizeof (skein_mech_info_tab) / sizeof (crypto_mech_info_t), skein_mech_info_tab }; static crypto_kcf_provider_handle_t skein_prov_handle = 0; typedef struct skein_ctx { skein_mech_type_t sc_mech_type; size_t sc_digest_bitlen; /*LINTED(E_ANONYMOUS_UNION_DECL)*/ union { Skein_256_Ctxt_t sc_256; Skein_512_Ctxt_t sc_512; Skein1024_Ctxt_t sc_1024; }; } skein_ctx_t; #define SKEIN_CTX(_ctx_) ((skein_ctx_t *)((_ctx_)->cc_provider_private)) #define SKEIN_CTX_LVALUE(_ctx_) (_ctx_)->cc_provider_private #define SKEIN_OP(_skein_ctx, _op, ...) \ do { \ skein_ctx_t *sc = (_skein_ctx); \ switch (sc->sc_mech_type) { \ - case SKEIN_256_MECH_INFO_TYPE: \ case SKEIN_256_MAC_MECH_INFO_TYPE: \ (void) Skein_256_ ## _op(&sc->sc_256, __VA_ARGS__);\ break; \ - case SKEIN_512_MECH_INFO_TYPE: \ case SKEIN_512_MAC_MECH_INFO_TYPE: \ (void) Skein_512_ ## _op(&sc->sc_512, __VA_ARGS__);\ break; \ - case SKEIN1024_MECH_INFO_TYPE: \ case SKEIN1024_MAC_MECH_INFO_TYPE: \ (void) Skein1024_ ## _op(&sc->sc_1024, __VA_ARGS__);\ break; \ } \ } while (0) static int skein_get_digest_bitlen(const crypto_mechanism_t *mechanism, size_t *result) { if (mechanism->cm_param != NULL) { /*LINTED(E_BAD_PTR_CAST_ALIGN)*/ skein_param_t *param = (skein_param_t *)mechanism->cm_param; if (mechanism->cm_param_len != sizeof (*param) || param->sp_digest_bitlen == 0) { return (CRYPTO_MECHANISM_PARAM_INVALID); } *result = param->sp_digest_bitlen; } else { - switch (mechanism->cm_type) { - case SKEIN_256_MECH_INFO_TYPE: - *result = 256; - break; - case SKEIN_512_MECH_INFO_TYPE: - *result = 512; - break; - case SKEIN1024_MECH_INFO_TYPE: - *result = 1024; - break; - default: - return (CRYPTO_MECHANISM_INVALID); - } + return (CRYPTO_MECHANISM_INVALID); } return (CRYPTO_SUCCESS); } int skein_mod_init(void) { /* * Try to register with KCF - failure shouldn't unload us, since we * still may want to continue providing misc/skein functionality. */ (void) crypto_register_provider(&skein_prov_info, &skein_prov_handle); return (0); } int skein_mod_fini(void) { int ret = 0; if (skein_prov_handle != 0) { if ((ret = crypto_unregister_provider(skein_prov_handle)) != CRYPTO_SUCCESS) { cmn_err(CE_WARN, "skein _fini: crypto_unregister_provider() " "failed (0x%x)", ret); return (EBUSY); } skein_prov_handle = 0; } return (0); } /* * General Skein hashing helper functions. */ /* * Performs an Update on a context with uio input data. */ static int skein_digest_update_uio(skein_ctx_t *ctx, const crypto_data_t *data) { off_t offset = data->cd_offset; size_t length = data->cd_length; uint_t vec_idx = 0; size_t cur_len; zfs_uio_t *uio = data->cd_uio; /* we support only kernel buffer */ if (zfs_uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing data to be * digested. */ offset = zfs_uio_index_at_offset(uio, offset, &vec_idx); if (vec_idx == zfs_uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. */ return (CRYPTO_DATA_LEN_RANGE); } /* * Now do the digesting on the iovecs. */ while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length); SKEIN_OP(ctx, Update, (uint8_t *)zfs_uio_iovbase(uio, vec_idx) + offset, cur_len); length -= cur_len; vec_idx++; offset = 0; } if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. * The caller requested to digest more data than it provided. */ return (CRYPTO_DATA_LEN_RANGE); } return (CRYPTO_SUCCESS); } /* * Performs a Final on a context and writes to a uio digest output. */ static int skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest) { off_t offset = digest->cd_offset; uint_t vec_idx = 0; zfs_uio_t *uio = digest->cd_uio; /* we support only kernel buffer */ if (zfs_uio_segflg(uio) != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to be returned. */ offset = zfs_uio_index_at_offset(uio, offset, &vec_idx); if (vec_idx == zfs_uio_iovcnt(uio)) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. */ return (CRYPTO_DATA_LEN_RANGE); } if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= zfs_uio_iovlen(uio, vec_idx)) { /* The computed digest will fit in the current iovec. */ SKEIN_OP(ctx, Final, (uchar_t *)zfs_uio_iovbase(uio, vec_idx) + offset); } else { uint8_t *digest_tmp; off_t scratch_offset = 0; size_t length = CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen); size_t cur_len; digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES( ctx->sc_digest_bitlen), KM_SLEEP); if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); while (vec_idx < zfs_uio_iovcnt(uio) && length > 0) { cur_len = MIN(zfs_uio_iovlen(uio, vec_idx) - offset, length); memcpy(zfs_uio_iovbase(uio, vec_idx) + offset, digest_tmp + scratch_offset, cur_len); length -= cur_len; vec_idx++; scratch_offset += cur_len; offset = 0; } kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); if (vec_idx == zfs_uio_iovcnt(uio) && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. * The caller requested to digest more data than it * provided. */ return (CRYPTO_DATA_LEN_RANGE); } } return (CRYPTO_SUCCESS); } /* * KCF software provider digest entry points. */ -/* - * Initializes a skein digest context to the configuration in `mechanism'. - * The mechanism cm_type must be one of SKEIN_*_MECH_INFO_TYPE. The cm_param - * field may contain a skein_param_t structure indicating the length of the - * digest the algorithm should produce. Otherwise the default output lengths - * are applied (32 bytes for Skein-256, 64 bytes for Skein-512 and 128 bytes - * for Skein-1024). - */ -static int -skein_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism) -{ - int error = CRYPTO_SUCCESS; - - if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type)) - return (CRYPTO_MECHANISM_INVALID); - - SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP); - if (SKEIN_CTX(ctx) == NULL) - return (CRYPTO_HOST_MEMORY); - - SKEIN_CTX(ctx)->sc_mech_type = mechanism->cm_type; - error = skein_get_digest_bitlen(mechanism, - &SKEIN_CTX(ctx)->sc_digest_bitlen); - if (error != CRYPTO_SUCCESS) - goto errout; - SKEIN_OP(SKEIN_CTX(ctx), Init, SKEIN_CTX(ctx)->sc_digest_bitlen); - - return (CRYPTO_SUCCESS); -errout: - memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); - kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); - SKEIN_CTX_LVALUE(ctx) = NULL; - return (error); -} - -/* - * Executes a skein_update and skein_digest on a pre-initialized crypto - * context in a single step. See the documentation to these functions to - * see what to pass here. - */ -static int -skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest) -{ - int error = CRYPTO_SUCCESS; - - ASSERT(SKEIN_CTX(ctx) != NULL); - - if (digest->cd_length < - CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) { - digest->cd_length = - CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); - return (CRYPTO_BUFFER_TOO_SMALL); - } - - error = skein_update(ctx, data); - if (error != CRYPTO_SUCCESS) { - memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); - kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); - SKEIN_CTX_LVALUE(ctx) = NULL; - digest->cd_length = 0; - return (error); - } - error = skein_final(ctx, digest); - - return (error); -} - /* * Performs a skein Update with the input message in `data' (successive calls * can push more data). This is used both for digest and MAC operation. * Supported input data formats are raw, uio and mblk. */ static int skein_update(crypto_ctx_t *ctx, crypto_data_t *data) { int error = CRYPTO_SUCCESS; ASSERT(SKEIN_CTX(ctx) != NULL); switch (data->cd_format) { case CRYPTO_DATA_RAW: SKEIN_OP(SKEIN_CTX(ctx), Update, (uint8_t *)data->cd_raw.iov_base + data->cd_offset, data->cd_length); break; case CRYPTO_DATA_UIO: error = skein_digest_update_uio(SKEIN_CTX(ctx), data); break; default: error = CRYPTO_ARGUMENTS_BAD; } return (error); } /* * Performs a skein Final, writing the output to `digest'. This is used both * for digest and MAC operation. * Supported output digest formats are raw, uio and mblk. */ static int skein_final_nofree(crypto_ctx_t *ctx, crypto_data_t *digest) { int error = CRYPTO_SUCCESS; ASSERT(SKEIN_CTX(ctx) != NULL); if (digest->cd_length < CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) { digest->cd_length = CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); return (CRYPTO_BUFFER_TOO_SMALL); } switch (digest->cd_format) { case CRYPTO_DATA_RAW: SKEIN_OP(SKEIN_CTX(ctx), Final, (uint8_t *)digest->cd_raw.iov_base + digest->cd_offset); break; case CRYPTO_DATA_UIO: error = skein_digest_final_uio(SKEIN_CTX(ctx), digest); break; default: error = CRYPTO_ARGUMENTS_BAD; } if (error == CRYPTO_SUCCESS) digest->cd_length = CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); else digest->cd_length = 0; return (error); } static int skein_final(crypto_ctx_t *ctx, crypto_data_t *digest) { int error = skein_final_nofree(ctx, digest); if (error == CRYPTO_BUFFER_TOO_SMALL) return (error); memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx)))); SKEIN_CTX_LVALUE(ctx) = NULL; return (error); } -/* - * Performs a full skein digest computation in a single call, configuring the - * algorithm according to `mechanism', reading the input to be digested from - * `data' and writing the output to `digest'. - * Supported input/output formats are raw, uio and mblk. - */ -static int -skein_digest_atomic(crypto_mechanism_t *mechanism, crypto_data_t *data, - crypto_data_t *digest) -{ - int error; - skein_ctx_t skein_ctx; - crypto_ctx_t ctx; - SKEIN_CTX_LVALUE(&ctx) = &skein_ctx; - - /* Init */ - if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type)) - return (CRYPTO_MECHANISM_INVALID); - skein_ctx.sc_mech_type = mechanism->cm_type; - error = skein_get_digest_bitlen(mechanism, &skein_ctx.sc_digest_bitlen); - if (error != CRYPTO_SUCCESS) - goto out; - SKEIN_OP(&skein_ctx, Init, skein_ctx.sc_digest_bitlen); - - if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS) - goto out; - if ((error = skein_final_nofree(&ctx, data)) != CRYPTO_SUCCESS) - goto out; - -out: - if (error == CRYPTO_SUCCESS) - digest->cd_length = - CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen); - else - digest->cd_length = 0; - memset(&skein_ctx, 0, sizeof (skein_ctx)); - - return (error); -} - /* * Helper function that builds a Skein MAC context from the provided * mechanism and key. */ static int skein_mac_ctx_build(skein_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key) { int error; if (!VALID_SKEIN_MAC_MECH(mechanism->cm_type)) return (CRYPTO_MECHANISM_INVALID); ctx->sc_mech_type = mechanism->cm_type; error = skein_get_digest_bitlen(mechanism, &ctx->sc_digest_bitlen); if (error != CRYPTO_SUCCESS) return (error); SKEIN_OP(ctx, InitExt, ctx->sc_digest_bitlen, 0, key->ck_data, CRYPTO_BITS2BYTES(key->ck_length)); return (CRYPTO_SUCCESS); } /* * KCF software provide mac entry points. */ /* * Initializes a skein MAC context. You may pass a ctx_template, in which * case the template will be reused to make initialization more efficient. * Otherwise a new context will be constructed. The mechanism cm_type must * be one of SKEIN_*_MAC_MECH_INFO_TYPE. Same as in skein_digest_init, you * may pass a skein_param_t in cm_param to configure the length of the * digest. The key must be in raw format. */ static int skein_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t ctx_template) { int error; SKEIN_CTX_LVALUE(ctx) = kmem_alloc(sizeof (*SKEIN_CTX(ctx)), KM_SLEEP); if (SKEIN_CTX(ctx) == NULL) return (CRYPTO_HOST_MEMORY); if (ctx_template != NULL) { memcpy(SKEIN_CTX(ctx), ctx_template, sizeof (*SKEIN_CTX(ctx))); } else { error = skein_mac_ctx_build(SKEIN_CTX(ctx), mechanism, key); if (error != CRYPTO_SUCCESS) goto errout; } return (CRYPTO_SUCCESS); errout: memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); return (error); } /* * The MAC update and final calls are reused from the regular digest code. */ /* * Same as skein_digest_atomic, performs an atomic Skein MAC operation in * one step. All the same properties apply to the arguments of this * function as to those of the partial operations above. */ static int skein_mac_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template) { /* faux crypto context just for skein_digest_{update,final} */ int error; crypto_ctx_t ctx; skein_ctx_t skein_ctx; SKEIN_CTX_LVALUE(&ctx) = &skein_ctx; if (ctx_template != NULL) { memcpy(&skein_ctx, ctx_template, sizeof (skein_ctx)); } else { error = skein_mac_ctx_build(&skein_ctx, mechanism, key); if (error != CRYPTO_SUCCESS) goto errout; } if ((error = skein_update(&ctx, data)) != CRYPTO_SUCCESS) goto errout; if ((error = skein_final_nofree(&ctx, mac)) != CRYPTO_SUCCESS) goto errout; return (CRYPTO_SUCCESS); errout: memset(&skein_ctx, 0, sizeof (skein_ctx)); return (error); } /* * KCF software provider context management entry points. */ /* * Constructs a context template for the Skein MAC algorithm. The same * properties apply to the arguments of this function as to those of * skein_mac_init. */ static int skein_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size) { int error; skein_ctx_t *ctx_tmpl; ctx_tmpl = kmem_alloc(sizeof (*ctx_tmpl), KM_SLEEP); if (ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); error = skein_mac_ctx_build(ctx_tmpl, mechanism, key); if (error != CRYPTO_SUCCESS) goto errout; *ctx_template = ctx_tmpl; *ctx_template_size = sizeof (*ctx_tmpl); return (CRYPTO_SUCCESS); errout: memset(ctx_tmpl, 0, sizeof (*ctx_tmpl)); kmem_free(ctx_tmpl, sizeof (*ctx_tmpl)); return (error); } /* * Frees a skein context in a parent crypto context. */ static int skein_free_context(crypto_ctx_t *ctx) { if (SKEIN_CTX(ctx) != NULL) { memset(SKEIN_CTX(ctx), 0, sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); SKEIN_CTX_LVALUE(ctx) = NULL; } return (CRYPTO_SUCCESS); }