diff --git a/include/sys/crypto/common.h b/include/sys/crypto/common.h index 261e88eceeea..743805650057 100644 --- a/include/sys/crypto/common.h +++ b/include/sys/crypto/common.h @@ -1,187 +1,170 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. */ /* * Copyright 2013 Saso Kiselkov. All rights reserved. */ #ifndef _SYS_CRYPTO_COMMON_H #define _SYS_CRYPTO_COMMON_H /* * Header file for the common data structures of the cryptographic framework */ #ifdef __cplusplus extern "C" { #endif #include /* Cryptographic Mechanisms */ #define CRYPTO_MAX_MECH_NAME 32 typedef char crypto_mech_name_t[CRYPTO_MAX_MECH_NAME]; typedef uint64_t crypto_mech_type_t; typedef struct crypto_mechanism { crypto_mech_type_t cm_type; /* mechanism type */ caddr_t cm_param; /* mech. parameter */ size_t cm_param_len; /* mech. parameter len */ } crypto_mechanism_t; -/* CK_AES_CTR_PARAMS provides parameters to the CKM_AES_CTR mechanism */ -typedef struct CK_AES_CTR_PARAMS { - ulong_t ulCounterBits; - uint8_t cb[16]; -} CK_AES_CTR_PARAMS; - /* CK_AES_CCM_PARAMS provides parameters to the CKM_AES_CCM mechanism */ typedef struct CK_AES_CCM_PARAMS { ulong_t ulMACSize; ulong_t ulNonceSize; ulong_t ulAuthDataSize; ulong_t ulDataSize; /* used for plaintext or ciphertext */ uchar_t *nonce; uchar_t *authData; } CK_AES_CCM_PARAMS; /* CK_AES_GCM_PARAMS provides parameters to the CKM_AES_GCM mechanism */ typedef struct CK_AES_GCM_PARAMS { uchar_t *pIv; ulong_t ulIvLen; ulong_t ulIvBits; uchar_t *pAAD; ulong_t ulAADLen; ulong_t ulTagBits; } CK_AES_GCM_PARAMS; -/* CK_AES_GMAC_PARAMS provides parameters to the CKM_AES_GMAC mechanism */ -typedef struct CK_AES_GMAC_PARAMS { - uchar_t *pIv; - uchar_t *pAAD; - ulong_t ulAADLen; -} CK_AES_GMAC_PARAMS; - /* * The measurement unit bit flag for a mechanism's minimum or maximum key size. * The unit are mechanism dependent. It can be in bits or in bytes. */ typedef uint32_t crypto_keysize_unit_t; /* Mechanisms supported out-of-the-box */ #define SUN_CKM_SHA256 "CKM_SHA256" #define SUN_CKM_SHA256_HMAC "CKM_SHA256_HMAC" #define SUN_CKM_SHA256_HMAC_GENERAL "CKM_SHA256_HMAC_GENERAL" #define SUN_CKM_SHA384 "CKM_SHA384" #define SUN_CKM_SHA384_HMAC "CKM_SHA384_HMAC" #define SUN_CKM_SHA384_HMAC_GENERAL "CKM_SHA384_HMAC_GENERAL" #define SUN_CKM_SHA512 "CKM_SHA512" #define SUN_CKM_SHA512_HMAC "CKM_SHA512_HMAC" #define SUN_CKM_SHA512_HMAC_GENERAL "CKM_SHA512_HMAC_GENERAL" #define SUN_CKM_SHA512_224 "CKM_SHA512_224" #define SUN_CKM_SHA512_256 "CKM_SHA512_256" -#define SUN_CKM_AES_CBC "CKM_AES_CBC" -#define SUN_CKM_AES_ECB "CKM_AES_ECB" -#define SUN_CKM_AES_CTR "CKM_AES_CTR" #define SUN_CKM_AES_CCM "CKM_AES_CCM" #define SUN_CKM_AES_GCM "CKM_AES_GCM" -#define SUN_CKM_AES_GMAC "CKM_AES_GMAC" /* Data arguments of cryptographic operations */ typedef enum crypto_data_format { CRYPTO_DATA_RAW = 1, CRYPTO_DATA_UIO, } crypto_data_format_t; typedef struct crypto_data { crypto_data_format_t cd_format; /* Format identifier */ off_t cd_offset; /* Offset from the beginning */ size_t cd_length; /* # of bytes in use */ union { /* Raw format */ iovec_t cd_raw; /* Pointer and length */ /* uio scatter-gather format */ zfs_uio_t *cd_uio; }; /* Crypto Data Union */ } crypto_data_t; /* The keys, and their contents */ typedef struct { uint_t ck_length; /* # of bits in ck_data */ void *ck_data; /* ptr to key value */ } crypto_key_t; /* * Raw key lengths are expressed in number of bits. * The following macro returns the minimum number of * bytes that can contain the specified number of bits. * Round up without overflowing the integer type. */ #define CRYPTO_BITS2BYTES(n) ((n) == 0 ? 0 : (((n) - 1) >> 3) + 1) #define CRYPTO_BYTES2BITS(n) ((n) << 3) /* Providers */ typedef uint32_t crypto_provider_id_t; #define KCF_PROVID_INVALID ((uint32_t)-1) /* session data structure opaque to the consumer */ typedef void *crypto_session_t; #define PROVIDER_OWNS_KEY_SCHEDULE 0x00000001 /* * Common cryptographic status and error codes. */ #define CRYPTO_SUCCESS 0x00000000 #define CRYPTO_HOST_MEMORY 0x00000002 #define CRYPTO_FAILED 0x00000004 #define CRYPTO_ARGUMENTS_BAD 0x00000005 #define CRYPTO_DATA_LEN_RANGE 0x0000000C #define CRYPTO_ENCRYPTED_DATA_LEN_RANGE 0x00000011 #define CRYPTO_KEY_SIZE_RANGE 0x00000013 #define CRYPTO_KEY_TYPE_INCONSISTENT 0x00000014 #define CRYPTO_MECHANISM_INVALID 0x0000001C #define CRYPTO_MECHANISM_PARAM_INVALID 0x0000001D #define CRYPTO_SIGNATURE_INVALID 0x0000002D #define CRYPTO_BUFFER_TOO_SMALL 0x00000042 #define CRYPTO_NOT_SUPPORTED 0x00000044 #define CRYPTO_INVALID_CONTEXT 0x00000047 #define CRYPTO_INVALID_MAC 0x00000048 #define CRYPTO_MECH_NOT_SUPPORTED 0x00000049 #define CRYPTO_INVALID_PROVIDER_ID 0x0000004C #define CRYPTO_BUSY 0x0000004E #define CRYPTO_UNKNOWN_PROVIDER 0x0000004F #ifdef __cplusplus } #endif #endif /* _SYS_CRYPTO_COMMON_H */ diff --git a/lib/libicp/Makefile.am b/lib/libicp/Makefile.am index 4ba55b2158bc..f40512bec9c7 100644 --- a/lib/libicp/Makefile.am +++ b/lib/libicp/Makefile.am @@ -1,84 +1,81 @@ libicp_la_CCASFLAGS = $(AM_CCASFLAGS) libicp_la_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS) $(LIBRARY_CFLAGS) noinst_LTLIBRARIES += libicp.la nodist_libicp_la_SOURCES = \ module/icp/spi/kcf_spi.c \ module/icp/api/kcf_ctxops.c \ module/icp/api/kcf_cipher.c \ module/icp/api/kcf_mac.c \ module/icp/algs/aes/aes_impl_aesni.c \ module/icp/algs/aes/aes_impl_generic.c \ module/icp/algs/aes/aes_impl_x86-64.c \ module/icp/algs/aes/aes_impl.c \ module/icp/algs/aes/aes_modes.c \ module/icp/algs/blake3/blake3.c \ module/icp/algs/blake3/blake3_generic.c \ module/icp/algs/blake3/blake3_impl.c \ module/icp/algs/edonr/edonr.c \ module/icp/algs/modes/modes.c \ - module/icp/algs/modes/cbc.c \ module/icp/algs/modes/gcm_generic.c \ module/icp/algs/modes/gcm_pclmulqdq.c \ module/icp/algs/modes/gcm.c \ - module/icp/algs/modes/ctr.c \ module/icp/algs/modes/ccm.c \ - module/icp/algs/modes/ecb.c \ module/icp/algs/sha2/sha2_generic.c \ module/icp/algs/sha2/sha256_impl.c \ module/icp/algs/sha2/sha512_impl.c \ module/icp/algs/skein/skein.c \ module/icp/algs/skein/skein_block.c \ module/icp/algs/skein/skein_iv.c \ module/icp/illumos-crypto.c \ module/icp/io/aes.c \ module/icp/io/sha2_mod.c \ module/icp/io/skein_mod.c \ module/icp/core/kcf_sched.c \ module/icp/core/kcf_prov_lib.c \ module/icp/core/kcf_callprov.c \ module/icp/core/kcf_mech_tabs.c \ module/icp/core/kcf_prov_tabs.c \ module/zfs/zfs_impl.c if TARGET_CPU_AARCH64 nodist_libicp_la_SOURCES += \ module/icp/asm-aarch64/blake3/b3_aarch64_sse2.S \ module/icp/asm-aarch64/blake3/b3_aarch64_sse41.S \ module/icp/asm-aarch64/sha2/sha256-armv8.S \ module/icp/asm-aarch64/sha2/sha512-armv8.S endif if TARGET_CPU_ARM nodist_libicp_la_SOURCES += \ module/icp/asm-arm/sha2/sha256-armv7.S \ module/icp/asm-arm/sha2/sha512-armv7.S endif if TARGET_CPU_POWERPC nodist_libicp_la_SOURCES += \ module/icp/asm-ppc64/blake3/b3_ppc64le_sse2.S \ module/icp/asm-ppc64/blake3/b3_ppc64le_sse41.S \ module/icp/asm-ppc64/sha2/sha256-ppc.S \ module/icp/asm-ppc64/sha2/sha512-ppc.S \ module/icp/asm-ppc64/sha2/sha256-p8.S \ module/icp/asm-ppc64/sha2/sha512-p8.S endif if TARGET_CPU_X86_64 nodist_libicp_la_SOURCES += \ module/icp/asm-x86_64/aes/aeskey.c \ module/icp/asm-x86_64/aes/aes_amd64.S \ module/icp/asm-x86_64/aes/aes_aesni.S \ module/icp/asm-x86_64/modes/gcm_pclmulqdq.S \ module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S \ module/icp/asm-x86_64/modes/ghash-x86_64.S \ module/icp/asm-x86_64/sha2/sha256-x86_64.S \ module/icp/asm-x86_64/sha2/sha512-x86_64.S \ module/icp/asm-x86_64/blake3/blake3_avx2.S \ module/icp/asm-x86_64/blake3/blake3_avx512.S \ module/icp/asm-x86_64/blake3/blake3_sse2.S \ module/icp/asm-x86_64/blake3/blake3_sse41.S endif diff --git a/module/Kbuild.in b/module/Kbuild.in index 7e08374fa2b9..6e2eab22588b 100644 --- a/module/Kbuild.in +++ b/module/Kbuild.in @@ -1,505 +1,502 @@ # When integrated in to a monolithic kernel the spl module must appear # first. This ensures its module initialization function is run before # any of the other module initialization functions which depend on it. ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement ZFS_MODULE_CFLAGS += -Wmissing-prototypes ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@ ifneq ($(KBUILD_EXTMOD),) zfs_include = @abs_top_srcdir@/include icp_include = @abs_srcdir@/icp/include zstd_include = @abs_srcdir@/zstd/include ZFS_MODULE_CFLAGS += -include @abs_top_builddir@/zfs_config.h ZFS_MODULE_CFLAGS += -I@abs_top_builddir@/include src = @abs_srcdir@ obj = @abs_builddir@ else zfs_include = $(srctree)/include/zfs icp_include = $(srctree)/$(src)/icp/include zstd_include = $(srctree)/$(src)/zstd/include ZFS_MODULE_CFLAGS += -include $(zfs_include)/zfs_config.h endif ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/kernel ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/spl ZFS_MODULE_CFLAGS += -I$(zfs_include)/os/linux/zfs ZFS_MODULE_CFLAGS += -I$(zfs_include) ZFS_MODULE_CPPFLAGS += -D_KERNEL ZFS_MODULE_CPPFLAGS += @KERNEL_DEBUG_CPPFLAGS@ # KASAN enables -Werror=frame-larger-than=1024, which # breaks oh so many parts of our build. ifeq ($(CONFIG_KASAN),y) ZFS_MODULE_CFLAGS += -Wno-error=frame-larger-than= endif # Generated binary search code is particularly bad with this optimization. # Oddly, range_tree.c is not affected when unrolling is not done and dsl_scan.c # is not affected when unrolling is done. # Disable it until the following upstream issue is resolved: # https://github.com/llvm/llvm-project/issues/62790 ifeq ($(CONFIG_X86),y) ifeq ($(CONFIG_CC_IS_CLANG),y) CFLAGS_zfs/dsl_scan.o += -mllvm -x86-cmov-converter=false CFLAGS_zfs/metaslab.o += -mllvm -x86-cmov-converter=false CFLAGS_zfs/range_tree.o += -mllvm -x86-cmov-converter=false CFLAGS_zfs/zap_micro.o += -mllvm -x86-cmov-converter=false endif endif ifneq ($(KBUILD_EXTMOD),) @CONFIG_QAT_TRUE@ZFS_MODULE_CFLAGS += -I@QAT_SRC@/include @CONFIG_QAT_TRUE@KBUILD_EXTRA_SYMBOLS += @QAT_SYMBOLS@ endif asflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS) ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS) ifeq ($(CONFIG_ARM64),y) CFLAGS_REMOVE_zcommon/zfs_fletcher_aarch64_neon.o += -mgeneral-regs-only CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neon.o += -mgeneral-regs-only CFLAGS_REMOVE_zfs/vdev_raidz_math_aarch64_neonx2.o += -mgeneral-regs-only endif # Suppress unused-value warnings in sparc64 architecture headers ccflags-$(CONFIG_SPARC64) += -Wno-unused-value obj-$(CONFIG_ZFS) := spl.o zfs.o SPL_OBJS := \ spl-atomic.o \ spl-condvar.o \ spl-cred.o \ spl-err.o \ spl-generic.o \ spl-kmem-cache.o \ spl-kmem.o \ spl-kstat.o \ spl-proc.o \ spl-procfs-list.o \ spl-shrinker.o \ spl-taskq.o \ spl-thread.o \ spl-trace.o \ spl-tsd.o \ spl-vmem.o \ spl-xdr.o \ spl-zlib.o \ spl-zone.o spl-objs += $(addprefix os/linux/spl/,$(SPL_OBJS)) zfs-objs += avl/avl.o ICP_OBJS := \ algs/aes/aes_impl.o \ algs/aes/aes_impl_generic.o \ algs/aes/aes_modes.o \ algs/blake3/blake3.o \ algs/blake3/blake3_generic.o \ algs/blake3/blake3_impl.o \ algs/edonr/edonr.o \ - algs/modes/cbc.o \ algs/modes/ccm.o \ - algs/modes/ctr.o \ - algs/modes/ecb.o \ algs/modes/gcm.o \ algs/modes/gcm_generic.o \ algs/modes/modes.o \ algs/sha2/sha2_generic.o \ algs/sha2/sha256_impl.o \ algs/sha2/sha512_impl.o \ algs/skein/skein.o \ algs/skein/skein_block.o \ algs/skein/skein_iv.o \ api/kcf_cipher.o \ api/kcf_ctxops.o \ api/kcf_mac.o \ core/kcf_callprov.o \ core/kcf_mech_tabs.o \ core/kcf_prov_lib.o \ core/kcf_prov_tabs.o \ core/kcf_sched.o \ illumos-crypto.o \ io/aes.o \ io/sha2_mod.o \ io/skein_mod.o \ spi/kcf_spi.o ICP_OBJS_X86_64 := \ asm-x86_64/aes/aes_aesni.o \ asm-x86_64/aes/aes_amd64.o \ asm-x86_64/aes/aeskey.o \ asm-x86_64/blake3/blake3_avx2.o \ asm-x86_64/blake3/blake3_avx512.o \ asm-x86_64/blake3/blake3_sse2.o \ asm-x86_64/blake3/blake3_sse41.o \ asm-x86_64/sha2/sha256-x86_64.o \ asm-x86_64/sha2/sha512-x86_64.o \ asm-x86_64/modes/aesni-gcm-x86_64.o \ asm-x86_64/modes/gcm_pclmulqdq.o \ asm-x86_64/modes/ghash-x86_64.o ICP_OBJS_X86 := \ algs/aes/aes_impl_aesni.o \ algs/aes/aes_impl_x86-64.o \ algs/modes/gcm_pclmulqdq.o ICP_OBJS_ARM := \ asm-arm/sha2/sha256-armv7.o \ asm-arm/sha2/sha512-armv7.o ICP_OBJS_ARM64 := \ asm-aarch64/blake3/b3_aarch64_sse2.o \ asm-aarch64/blake3/b3_aarch64_sse41.o \ asm-aarch64/sha2/sha256-armv8.o \ asm-aarch64/sha2/sha512-armv8.o ICP_OBJS_PPC_PPC64 := \ asm-ppc64/blake3/b3_ppc64le_sse2.o \ asm-ppc64/blake3/b3_ppc64le_sse41.o \ asm-ppc64/sha2/sha256-p8.o \ asm-ppc64/sha2/sha512-p8.o \ asm-ppc64/sha2/sha256-ppc.o \ asm-ppc64/sha2/sha512-ppc.o zfs-objs += $(addprefix icp/,$(ICP_OBJS)) zfs-$(CONFIG_X86) += $(addprefix icp/,$(ICP_OBJS_X86)) zfs-$(CONFIG_UML_X86)+= $(addprefix icp/,$(ICP_OBJS_X86)) zfs-$(CONFIG_X86_64) += $(addprefix icp/,$(ICP_OBJS_X86_64)) zfs-$(CONFIG_ARM) += $(addprefix icp/,$(ICP_OBJS_ARM)) zfs-$(CONFIG_ARM64) += $(addprefix icp/,$(ICP_OBJS_ARM64)) zfs-$(CONFIG_PPC) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64)) zfs-$(CONFIG_PPC64) += $(addprefix icp/,$(ICP_OBJS_PPC_PPC64)) $(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \ $(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : asflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include) $(addprefix $(obj)/icp/,$(ICP_OBJS) $(ICP_OBJS_X86) $(ICP_OBJS_X86_64) \ $(ICP_OBJS_ARM64) $(ICP_OBJS_PPC_PPC64)) : ccflags-y += -I$(icp_include) -I$(zfs_include)/os/linux/spl -I$(zfs_include) # Suppress objtool "return with modified stack frame" warnings. OBJECT_FILES_NON_STANDARD_aesni-gcm-x86_64.o := y # Suppress objtool "unsupported stack pointer realignment" warnings. # See #6950 for the reasoning. OBJECT_FILES_NON_STANDARD_sha256-x86_64.o := y OBJECT_FILES_NON_STANDARD_sha512-x86_64.o := y LUA_OBJS := \ lapi.o \ lauxlib.o \ lbaselib.o \ lcode.o \ lcompat.o \ lcorolib.o \ lctype.o \ ldebug.o \ ldo.o \ lfunc.o \ lgc.o \ llex.o \ lmem.o \ lobject.o \ lopcodes.o \ lparser.o \ lstate.o \ lstring.o \ lstrlib.o \ ltable.o \ ltablib.o \ ltm.o \ lvm.o \ lzio.o \ setjmp/setjmp.o zfs-objs += $(addprefix lua/,$(LUA_OBJS)) NVPAIR_OBJS := \ fnvpair.o \ nvpair.o \ nvpair_alloc_fixed.o \ nvpair_alloc_spl.o zfs-objs += $(addprefix nvpair/,$(NVPAIR_OBJS)) UNICODE_OBJS := \ u8_textprep.o \ uconv.o zfs-objs += $(addprefix unicode/,$(UNICODE_OBJS)) ZCOMMON_OBJS := \ cityhash.o \ zfeature_common.o \ zfs_comutil.o \ zfs_deleg.o \ zfs_fletcher.o \ zfs_fletcher_superscalar.o \ zfs_fletcher_superscalar4.o \ zfs_namecheck.o \ zfs_prop.o \ zpool_prop.o \ zprop_common.o ZCOMMON_OBJS_X86 := \ zfs_fletcher_avx512.o \ zfs_fletcher_intel.o \ zfs_fletcher_sse.o ZCOMMON_OBJS_ARM64 := \ zfs_fletcher_aarch64_neon.o zfs-objs += $(addprefix zcommon/,$(ZCOMMON_OBJS)) zfs-$(CONFIG_X86) += $(addprefix zcommon/,$(ZCOMMON_OBJS_X86)) zfs-$(CONFIG_UML_X86)+= $(addprefix zcommon/,$(ZCOMMON_OBJS_X86)) zfs-$(CONFIG_ARM64) += $(addprefix zcommon/,$(ZCOMMON_OBJS_ARM64)) # Zstd uses -O3 by default, so we should follow ZFS_ZSTD_FLAGS := -O3 # -fno-tree-vectorize gets set for gcc in zstd/common/compiler.h # Set it for other compilers, too. ZFS_ZSTD_FLAGS += -fno-tree-vectorize # SSE register return with SSE disabled if -march=znverX is passed ZFS_ZSTD_FLAGS += -U__BMI__ # Quiet warnings about frame size due to unused code in unmodified zstd lib ZFS_ZSTD_FLAGS += -Wframe-larger-than=20480 ZSTD_OBJS := \ zfs_zstd.o \ zstd_sparc.o ZSTD_UPSTREAM_OBJS := \ lib/common/entropy_common.o \ lib/common/error_private.o \ lib/common/fse_decompress.o \ lib/common/pool.o \ lib/common/zstd_common.o \ lib/compress/fse_compress.o \ lib/compress/hist.o \ lib/compress/huf_compress.o \ lib/compress/zstd_compress.o \ lib/compress/zstd_compress_literals.o \ lib/compress/zstd_compress_sequences.o \ lib/compress/zstd_compress_superblock.o \ lib/compress/zstd_double_fast.o \ lib/compress/zstd_fast.o \ lib/compress/zstd_lazy.o \ lib/compress/zstd_ldm.o \ lib/compress/zstd_opt.o \ lib/decompress/huf_decompress.o \ lib/decompress/zstd_ddict.o \ lib/decompress/zstd_decompress.o \ lib/decompress/zstd_decompress_block.o zfs-objs += $(addprefix zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) # Disable aarch64 neon SIMD instructions for kernel mode $(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -I$(zstd_include) $(ZFS_ZSTD_FLAGS) $(addprefix $(obj)/zstd/,$(ZSTD_OBJS) $(ZSTD_UPSTREAM_OBJS)) : asflags-y += -I$(zstd_include) $(addprefix $(obj)/zstd/,$(ZSTD_UPSTREAM_OBJS)) : ccflags-y += -include $(zstd_include)/aarch64_compat.h -include $(zstd_include)/zstd_compat_wrapper.h -Wp,-w $(obj)/zstd/zfs_zstd.o : ccflags-y += -include $(zstd_include)/zstd_compat_wrapper.h ZFS_OBJS := \ abd.o \ aggsum.o \ arc.o \ blake3_zfs.o \ blkptr.o \ bplist.o \ bpobj.o \ bptree.o \ bqueue.o \ brt.o \ btree.o \ dataset_kstats.o \ dbuf.o \ dbuf_stats.o \ ddt.o \ ddt_stats.o \ ddt_zap.o \ dmu.o \ dmu_diff.o \ dmu_object.o \ dmu_objset.o \ dmu_recv.o \ dmu_redact.o \ dmu_send.o \ dmu_traverse.o \ dmu_tx.o \ dmu_zfetch.o \ dnode.o \ dnode_sync.o \ dsl_bookmark.o \ dsl_crypt.o \ dsl_dataset.o \ dsl_deadlist.o \ dsl_deleg.o \ dsl_destroy.o \ dsl_dir.o \ dsl_pool.o \ dsl_prop.o \ dsl_scan.o \ dsl_synctask.o \ dsl_userhold.o \ edonr_zfs.o \ fm.o \ gzip.o \ hkdf.o \ lz4.o \ lz4_zfs.o \ lzjb.o \ metaslab.o \ mmp.o \ multilist.o \ objlist.o \ pathname.o \ range_tree.o \ refcount.o \ rrwlock.o \ sa.o \ sha2_zfs.o \ skein_zfs.o \ spa.o \ spa_checkpoint.o \ spa_config.o \ spa_errlog.o \ spa_history.o \ spa_log_spacemap.o \ spa_misc.o \ spa_stats.o \ space_map.o \ space_reftree.o \ txg.o \ uberblock.o \ unique.o \ vdev.o \ vdev_draid.o \ vdev_draid_rand.o \ vdev_indirect.o \ vdev_indirect_births.o \ vdev_indirect_mapping.o \ vdev_initialize.o \ vdev_label.o \ vdev_mirror.o \ vdev_missing.o \ vdev_queue.o \ vdev_raidz.o \ vdev_raidz_math.o \ vdev_raidz_math_scalar.o \ vdev_rebuild.o \ vdev_removal.o \ vdev_root.o \ vdev_trim.o \ zap.o \ zap_leaf.o \ zap_micro.o \ zcp.o \ zcp_get.o \ zcp_global.o \ zcp_iter.o \ zcp_set.o \ zcp_synctask.o \ zfeature.o \ zfs_byteswap.o \ zfs_chksum.o \ zfs_fm.o \ zfs_fuid.o \ zfs_impl.o \ zfs_ioctl.o \ zfs_log.o \ zfs_onexit.o \ zfs_quota.o \ zfs_ratelimit.o \ zfs_replay.o \ zfs_rlock.o \ zfs_sa.o \ zfs_vnops.o \ zil.o \ zio.o \ zio_checksum.o \ zio_compress.o \ zio_inject.o \ zle.o \ zrlock.o \ zthr.o \ zvol.o ZFS_OBJS_OS := \ abd_os.o \ arc_os.o \ mmp_os.o \ policy.o \ qat.o \ qat_compress.o \ qat_crypt.o \ spa_misc_os.o \ trace.o \ vdev_disk.o \ vdev_file.o \ vdev_label_os.o \ zfs_acl.o \ zfs_ctldir.o \ zfs_debug.o \ zfs_dir.o \ zfs_file_os.o \ zfs_ioctl_os.o \ zfs_racct.o \ zfs_sysfs.o \ zfs_uio.o \ zfs_vfsops.o \ zfs_vnops_os.o \ zfs_znode.o \ zio_crypt.o \ zpl_ctldir.o \ zpl_export.o \ zpl_file.o \ zpl_file_range.o \ zpl_inode.o \ zpl_super.o \ zpl_xattr.o \ zvol_os.o ZFS_OBJS_X86 := \ vdev_raidz_math_avx2.o \ vdev_raidz_math_avx512bw.o \ vdev_raidz_math_avx512f.o \ vdev_raidz_math_sse2.o \ vdev_raidz_math_ssse3.o ZFS_OBJS_ARM64 := \ vdev_raidz_math_aarch64_neon.o \ vdev_raidz_math_aarch64_neonx2.o ZFS_OBJS_PPC_PPC64 := \ vdev_raidz_math_powerpc_altivec.o zfs-objs += $(addprefix zfs/,$(ZFS_OBJS)) $(addprefix os/linux/zfs/,$(ZFS_OBJS_OS)) zfs-$(CONFIG_X86) += $(addprefix zfs/,$(ZFS_OBJS_X86)) zfs-$(CONFIG_UML_X86)+= $(addprefix zfs/,$(ZFS_OBJS_X86)) zfs-$(CONFIG_ARM64) += $(addprefix zfs/,$(ZFS_OBJS_ARM64)) zfs-$(CONFIG_PPC) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64)) zfs-$(CONFIG_PPC64) += $(addprefix zfs/,$(ZFS_OBJS_PPC_PPC64)) UBSAN_SANITIZE_zap_leaf.o := n UBSAN_SANITIZE_zap_micro.o := n UBSAN_SANITIZE_sa.o := n # Suppress incorrect warnings from versions of objtool which are not # aware of x86 EVEX prefix instructions used for AVX512. OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y ifeq ($(CONFIG_ALTIVEC),y) $(obj)/zfs/vdev_raidz_math_powerpc_altivec.o : c_flags += -maltivec endif diff --git a/module/icp/algs/aes/aes_modes.c b/module/icp/algs/aes/aes_modes.c index 6a25496d050e..631e92f3542e 100644 --- a/module/icp/algs/aes/aes_modes.c +++ b/module/icp/algs/aes/aes_modes.c @@ -1,135 +1,114 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include /* Copy a 16-byte AES block from "in" to "out" */ void aes_copy_block(uint8_t *in, uint8_t *out) { if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) { /* LINTED: pointer alignment */ *(uint32_t *)&out[0] = *(uint32_t *)&in[0]; /* LINTED: pointer alignment */ *(uint32_t *)&out[4] = *(uint32_t *)&in[4]; /* LINTED: pointer alignment */ *(uint32_t *)&out[8] = *(uint32_t *)&in[8]; /* LINTED: pointer alignment */ *(uint32_t *)&out[12] = *(uint32_t *)&in[12]; } else { AES_COPY_BLOCK(in, out); } } /* XOR a 16-byte AES block of data into dst */ void aes_xor_block(uint8_t *data, uint8_t *dst) { if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) { /* LINTED: pointer alignment */ *(uint32_t *)&dst[0] ^= *(uint32_t *)&data[0]; /* LINTED: pointer alignment */ *(uint32_t *)&dst[4] ^= *(uint32_t *)&data[4]; /* LINTED: pointer alignment */ *(uint32_t *)&dst[8] ^= *(uint32_t *)&data[8]; /* LINTED: pointer alignment */ *(uint32_t *)&dst[12] ^= *(uint32_t *)&data[12]; } else { AES_XOR_BLOCK(data, dst); } } /* * Encrypt multiple blocks of data according to mode. */ int aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length, crypto_data_t *out) { aes_ctx_t *aes_ctx = ctx; int rv; - if (aes_ctx->ac_flags & CTR_MODE) { - rv = ctr_mode_contiguous_blocks(ctx, data, length, out, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - } else if (aes_ctx->ac_flags & CCM_MODE) { + if (aes_ctx->ac_flags & CCM_MODE) { rv = ccm_mode_encrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { rv = gcm_mode_encrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); - } else if (aes_ctx->ac_flags & CBC_MODE) { - rv = cbc_encrypt_contiguous_blocks(ctx, - data, length, out, AES_BLOCK_LEN, aes_encrypt_block, - aes_copy_block, aes_xor_block); - } else { - rv = ecb_cipher_contiguous_blocks(ctx, data, length, out, - AES_BLOCK_LEN, aes_encrypt_block); } + else + __builtin_unreachable(); return (rv); } /* * Decrypt multiple blocks of data according to mode. */ int aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length, crypto_data_t *out) { aes_ctx_t *aes_ctx = ctx; int rv; - if (aes_ctx->ac_flags & CTR_MODE) { - rv = ctr_mode_contiguous_blocks(ctx, data, length, out, - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); - if (rv == CRYPTO_DATA_LEN_RANGE) - rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; - } else if (aes_ctx->ac_flags & CCM_MODE) { + if (aes_ctx->ac_flags & CCM_MODE) { rv = ccm_mode_decrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { rv = gcm_mode_decrypt_contiguous_blocks(ctx, data, length, out, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); - } else if (aes_ctx->ac_flags & CBC_MODE) { - rv = cbc_decrypt_contiguous_blocks(ctx, data, length, out, - AES_BLOCK_LEN, aes_decrypt_block, aes_copy_block, - aes_xor_block); - } else { - rv = ecb_cipher_contiguous_blocks(ctx, data, length, out, - AES_BLOCK_LEN, aes_decrypt_block); - if (rv == CRYPTO_DATA_LEN_RANGE) - rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; - } + } else + __builtin_unreachable(); return (rv); } diff --git a/module/icp/algs/modes/cbc.c b/module/icp/algs/modes/cbc.c deleted file mode 100644 index d0219fb24c49..000000000000 --- a/module/icp/algs/modes/cbc.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License (the "License"). - * You may not use this file except in compliance with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or https://opensource.org/licenses/CDDL-1.0. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -/* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - -#include -#include -#include -#include - -/* - * Algorithm independent CBC functions. - */ -int -cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, - crypto_data_t *out, size_t block_size, - int (*encrypt)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)) -{ - size_t remainder = length; - size_t need = 0; - uint8_t *datap = (uint8_t *)data; - uint8_t *blockp; - uint8_t *lastp; - void *iov_or_mp; - offset_t offset; - uint8_t *out_data_1; - uint8_t *out_data_2; - size_t out_data_1_len; - - if (length + ctx->cbc_remainder_len < block_size) { - /* accumulate bytes here and return */ - memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, - datap, - length); - ctx->cbc_remainder_len += length; - ctx->cbc_copy_to = datap; - return (CRYPTO_SUCCESS); - } - - lastp = (uint8_t *)ctx->cbc_iv; - crypto_init_ptrs(out, &iov_or_mp, &offset); - - do { - /* Unprocessed data from last call. */ - if (ctx->cbc_remainder_len > 0) { - need = block_size - ctx->cbc_remainder_len; - - if (need > remainder) - return (CRYPTO_DATA_LEN_RANGE); - - memcpy(&((uint8_t *)ctx->cbc_remainder) - [ctx->cbc_remainder_len], datap, need); - - blockp = (uint8_t *)ctx->cbc_remainder; - } else { - blockp = datap; - } - - /* - * XOR the previous cipher block or IV with the - * current clear block. - */ - xor_block(blockp, lastp); - encrypt(ctx->cbc_keysched, lastp, lastp); - crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, - &out_data_1_len, &out_data_2, block_size); - - /* copy block to where it belongs */ - if (out_data_1_len == block_size) { - copy_block(lastp, out_data_1); - } else { - memcpy(out_data_1, lastp, out_data_1_len); - if (out_data_2 != NULL) { - memcpy(out_data_2, - lastp + out_data_1_len, - block_size - out_data_1_len); - } - } - /* update offset */ - out->cd_offset += block_size; - - /* Update pointer to next block of data to be processed. */ - if (ctx->cbc_remainder_len != 0) { - datap += need; - ctx->cbc_remainder_len = 0; - } else { - datap += block_size; - } - - remainder = (size_t)&data[length] - (size_t)datap; - - /* Incomplete last block. */ - if (remainder > 0 && remainder < block_size) { - memcpy(ctx->cbc_remainder, datap, remainder); - ctx->cbc_remainder_len = remainder; - ctx->cbc_copy_to = datap; - goto out; - } - ctx->cbc_copy_to = NULL; - - } while (remainder > 0); - -out: - /* - * Save the last encrypted block in the context. - */ - if (ctx->cbc_lastp != NULL) { - copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv); - ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv; - } - - return (CRYPTO_SUCCESS); -} - -#define OTHER(a, ctx) \ - (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock) - -int -cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, - crypto_data_t *out, size_t block_size, - int (*decrypt)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)) -{ - size_t remainder = length; - size_t need = 0; - uint8_t *datap = (uint8_t *)data; - uint8_t *blockp; - uint8_t *lastp; - void *iov_or_mp; - offset_t offset; - uint8_t *out_data_1; - uint8_t *out_data_2; - size_t out_data_1_len; - - if (length + ctx->cbc_remainder_len < block_size) { - /* accumulate bytes here and return */ - memcpy((uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, - datap, - length); - ctx->cbc_remainder_len += length; - ctx->cbc_copy_to = datap; - return (CRYPTO_SUCCESS); - } - - lastp = ctx->cbc_lastp; - crypto_init_ptrs(out, &iov_or_mp, &offset); - - do { - /* Unprocessed data from last call. */ - if (ctx->cbc_remainder_len > 0) { - need = block_size - ctx->cbc_remainder_len; - - if (need > remainder) - return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - - memcpy(&((uint8_t *)ctx->cbc_remainder) - [ctx->cbc_remainder_len], datap, need); - - blockp = (uint8_t *)ctx->cbc_remainder; - } else { - blockp = datap; - } - - /* LINTED: pointer alignment */ - copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx)); - - decrypt(ctx->cbc_keysched, blockp, - (uint8_t *)ctx->cbc_remainder); - blockp = (uint8_t *)ctx->cbc_remainder; - - /* - * XOR the previous cipher block or IV with the - * currently decrypted block. - */ - xor_block(lastp, blockp); - - /* LINTED: pointer alignment */ - lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx); - - crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, - &out_data_1_len, &out_data_2, block_size); - - memcpy(out_data_1, blockp, out_data_1_len); - if (out_data_2 != NULL) { - memcpy(out_data_2, blockp + out_data_1_len, - block_size - out_data_1_len); - } - - /* update offset */ - out->cd_offset += block_size; - - /* Update pointer to next block of data to be processed. */ - if (ctx->cbc_remainder_len != 0) { - datap += need; - ctx->cbc_remainder_len = 0; - } else { - datap += block_size; - } - - remainder = (size_t)&data[length] - (size_t)datap; - - /* Incomplete last block. */ - if (remainder > 0 && remainder < block_size) { - memcpy(ctx->cbc_remainder, datap, remainder); - ctx->cbc_remainder_len = remainder; - ctx->cbc_lastp = lastp; - ctx->cbc_copy_to = datap; - return (CRYPTO_SUCCESS); - } - ctx->cbc_copy_to = NULL; - - } while (remainder > 0); - - ctx->cbc_lastp = lastp; - return (CRYPTO_SUCCESS); -} - -int -cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len, - size_t block_size, void (*copy_block)(uint8_t *, uint64_t *)) -{ - /* Copy IV into context. */ - ASSERT3P(param, !=, NULL); - ASSERT3U(param_len, ==, block_size); - - copy_block((uchar_t *)param, cbc_ctx->cbc_iv); - - return (CRYPTO_SUCCESS); -} - -void * -cbc_alloc_ctx(int kmflag) -{ - cbc_ctx_t *cbc_ctx; - - if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL) - return (NULL); - - cbc_ctx->cbc_flags = CBC_MODE; - return (cbc_ctx); -} diff --git a/module/icp/algs/modes/ctr.c b/module/icp/algs/modes/ctr.c deleted file mode 100644 index db6b1c71d5cd..000000000000 --- a/module/icp/algs/modes/ctr.c +++ /dev/null @@ -1,227 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License (the "License"). - * You may not use this file except in compliance with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or https://opensource.org/licenses/CDDL-1.0. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -/* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - -#include -#include -#include -#include -#include - -/* - * Encrypt and decrypt multiple blocks of data in counter mode. - */ -int -ctr_mode_contiguous_blocks(ctr_ctx_t *ctx, char *data, size_t length, - crypto_data_t *out, size_t block_size, - int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct), - void (*xor_block)(uint8_t *, uint8_t *)) -{ - size_t remainder = length; - size_t need = 0; - uint8_t *datap = (uint8_t *)data; - uint8_t *blockp; - uint8_t *lastp; - void *iov_or_mp; - offset_t offset; - uint8_t *out_data_1; - uint8_t *out_data_2; - size_t out_data_1_len; - uint64_t lower_counter, upper_counter; - - if (length + ctx->ctr_remainder_len < block_size) { - /* accumulate bytes here and return */ - memcpy((uint8_t *)ctx->ctr_remainder + ctx->ctr_remainder_len, - datap, - length); - ctx->ctr_remainder_len += length; - ctx->ctr_copy_to = datap; - return (CRYPTO_SUCCESS); - } - - crypto_init_ptrs(out, &iov_or_mp, &offset); - - do { - /* Unprocessed data from last call. */ - if (ctx->ctr_remainder_len > 0) { - need = block_size - ctx->ctr_remainder_len; - - if (need > remainder) - return (CRYPTO_DATA_LEN_RANGE); - - memcpy(&((uint8_t *)ctx->ctr_remainder) - [ctx->ctr_remainder_len], datap, need); - - blockp = (uint8_t *)ctx->ctr_remainder; - } else { - blockp = datap; - } - - /* ctr_cb is the counter block */ - cipher(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb, - (uint8_t *)ctx->ctr_tmp); - - lastp = (uint8_t *)ctx->ctr_tmp; - - /* - * Increment Counter. - */ - lower_counter = ntohll(ctx->ctr_cb[1] & ctx->ctr_lower_mask); - lower_counter = htonll(lower_counter + 1); - lower_counter &= ctx->ctr_lower_mask; - ctx->ctr_cb[1] = (ctx->ctr_cb[1] & ~(ctx->ctr_lower_mask)) | - lower_counter; - - /* wrap around */ - if (lower_counter == 0) { - upper_counter = - ntohll(ctx->ctr_cb[0] & ctx->ctr_upper_mask); - upper_counter = htonll(upper_counter + 1); - upper_counter &= ctx->ctr_upper_mask; - ctx->ctr_cb[0] = - (ctx->ctr_cb[0] & ~(ctx->ctr_upper_mask)) | - upper_counter; - } - - /* - * XOR encrypted counter block with the current clear block. - */ - xor_block(blockp, lastp); - - crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, - &out_data_1_len, &out_data_2, block_size); - - /* copy block to where it belongs */ - memcpy(out_data_1, lastp, out_data_1_len); - if (out_data_2 != NULL) { - memcpy(out_data_2, lastp + out_data_1_len, - block_size - out_data_1_len); - } - /* update offset */ - out->cd_offset += block_size; - - /* Update pointer to next block of data to be processed. */ - if (ctx->ctr_remainder_len != 0) { - datap += need; - ctx->ctr_remainder_len = 0; - } else { - datap += block_size; - } - - remainder = (size_t)&data[length] - (size_t)datap; - - /* Incomplete last block. */ - if (remainder > 0 && remainder < block_size) { - memcpy(ctx->ctr_remainder, datap, remainder); - ctx->ctr_remainder_len = remainder; - ctx->ctr_copy_to = datap; - goto out; - } - ctx->ctr_copy_to = NULL; - - } while (remainder > 0); - -out: - return (CRYPTO_SUCCESS); -} - -int -ctr_mode_final(ctr_ctx_t *ctx, crypto_data_t *out, - int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)) -{ - uint8_t *lastp; - void *iov_or_mp; - offset_t offset; - uint8_t *out_data_1; - uint8_t *out_data_2; - size_t out_data_1_len; - uint8_t *p; - int i; - - if (out->cd_length < ctx->ctr_remainder_len) - return (CRYPTO_DATA_LEN_RANGE); - - encrypt_block(ctx->ctr_keysched, (uint8_t *)ctx->ctr_cb, - (uint8_t *)ctx->ctr_tmp); - - lastp = (uint8_t *)ctx->ctr_tmp; - p = (uint8_t *)ctx->ctr_remainder; - for (i = 0; i < ctx->ctr_remainder_len; i++) { - p[i] ^= lastp[i]; - } - - crypto_init_ptrs(out, &iov_or_mp, &offset); - crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, - &out_data_1_len, &out_data_2, ctx->ctr_remainder_len); - - memcpy(out_data_1, p, out_data_1_len); - if (out_data_2 != NULL) { - memcpy(out_data_2, - (uint8_t *)p + out_data_1_len, - ctx->ctr_remainder_len - out_data_1_len); - } - out->cd_offset += ctx->ctr_remainder_len; - ctx->ctr_remainder_len = 0; - return (CRYPTO_SUCCESS); -} - -int -ctr_init_ctx(ctr_ctx_t *ctr_ctx, ulong_t count, uint8_t *cb, - void (*copy_block)(uint8_t *, uint8_t *)) -{ - uint64_t upper_mask = 0; - uint64_t lower_mask = 0; - - if (count == 0 || count > 128) { - return (CRYPTO_MECHANISM_PARAM_INVALID); - } - /* upper 64 bits of the mask */ - if (count >= 64) { - count -= 64; - upper_mask = (count == 64) ? UINT64_MAX : (1ULL << count) - 1; - lower_mask = UINT64_MAX; - } else { - /* now the lower 63 bits */ - lower_mask = (1ULL << count) - 1; - } - ctr_ctx->ctr_lower_mask = htonll(lower_mask); - ctr_ctx->ctr_upper_mask = htonll(upper_mask); - - copy_block(cb, (uchar_t *)ctr_ctx->ctr_cb); - ctr_ctx->ctr_lastp = (uint8_t *)&ctr_ctx->ctr_cb[0]; - ctr_ctx->ctr_flags |= CTR_MODE; - return (CRYPTO_SUCCESS); -} - -void * -ctr_alloc_ctx(int kmflag) -{ - ctr_ctx_t *ctr_ctx; - - if ((ctr_ctx = kmem_zalloc(sizeof (ctr_ctx_t), kmflag)) == NULL) - return (NULL); - - ctr_ctx->ctr_flags = CTR_MODE; - return (ctr_ctx); -} diff --git a/module/icp/algs/modes/ecb.c b/module/icp/algs/modes/ecb.c deleted file mode 100644 index e2d8e71c161c..000000000000 --- a/module/icp/algs/modes/ecb.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License (the "License"). - * You may not use this file except in compliance with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or https://opensource.org/licenses/CDDL-1.0. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -/* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. - */ - -#include -#include -#include -#include - -/* - * Algorithm independent ECB functions. - */ -int -ecb_cipher_contiguous_blocks(ecb_ctx_t *ctx, char *data, size_t length, - crypto_data_t *out, size_t block_size, - int (*cipher)(const void *ks, const uint8_t *pt, uint8_t *ct)) -{ - size_t remainder = length; - size_t need = 0; - uint8_t *datap = (uint8_t *)data; - uint8_t *blockp; - uint8_t *lastp; - void *iov_or_mp; - offset_t offset; - uint8_t *out_data_1; - uint8_t *out_data_2; - size_t out_data_1_len; - - if (length + ctx->ecb_remainder_len < block_size) { - /* accumulate bytes here and return */ - memcpy((uint8_t *)ctx->ecb_remainder + ctx->ecb_remainder_len, - datap, - length); - ctx->ecb_remainder_len += length; - ctx->ecb_copy_to = datap; - return (CRYPTO_SUCCESS); - } - - lastp = (uint8_t *)ctx->ecb_iv; - crypto_init_ptrs(out, &iov_or_mp, &offset); - - do { - /* Unprocessed data from last call. */ - if (ctx->ecb_remainder_len > 0) { - need = block_size - ctx->ecb_remainder_len; - - if (need > remainder) - return (CRYPTO_DATA_LEN_RANGE); - - memcpy(&((uint8_t *)ctx->ecb_remainder) - [ctx->ecb_remainder_len], datap, need); - - blockp = (uint8_t *)ctx->ecb_remainder; - } else { - blockp = datap; - } - - cipher(ctx->ecb_keysched, blockp, lastp); - crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, - &out_data_1_len, &out_data_2, block_size); - - /* copy block to where it belongs */ - memcpy(out_data_1, lastp, out_data_1_len); - if (out_data_2 != NULL) { - memcpy(out_data_2, lastp + out_data_1_len, - block_size - out_data_1_len); - } - /* update offset */ - out->cd_offset += block_size; - - /* Update pointer to next block of data to be processed. */ - if (ctx->ecb_remainder_len != 0) { - datap += need; - ctx->ecb_remainder_len = 0; - } else { - datap += block_size; - } - - remainder = (size_t)&data[length] - (size_t)datap; - - /* Incomplete last block. */ - if (remainder > 0 && remainder < block_size) { - memcpy(ctx->ecb_remainder, datap, remainder); - ctx->ecb_remainder_len = remainder; - ctx->ecb_copy_to = datap; - goto out; - } - ctx->ecb_copy_to = NULL; - - } while (remainder > 0); - -out: - return (CRYPTO_SUCCESS); -} - -void * -ecb_alloc_ctx(int kmflag) -{ - ecb_ctx_t *ecb_ctx; - - if ((ecb_ctx = kmem_zalloc(sizeof (ecb_ctx_t), kmflag)) == NULL) - return (NULL); - - ecb_ctx->ecb_flags = ECB_MODE; - return (ecb_ctx); -} diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c index dd8db6f97460..21f4301d584d 100644 --- a/module/icp/algs/modes/gcm.c +++ b/module/icp/algs/modes/gcm.c @@ -1,1574 +1,1521 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. */ #include #include #include #include #include #include #include #include #include #ifdef CAN_USE_GCM_ASM #include #endif #define GHASH(c, d, t, o) \ xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \ (o)->mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \ (uint64_t *)(void *)(t)); /* Select GCM implementation */ #define IMPL_FASTEST (UINT32_MAX) #define IMPL_CYCLE (UINT32_MAX-1) #ifdef CAN_USE_GCM_ASM #define IMPL_AVX (UINT32_MAX-2) #endif #define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i)) static uint32_t icp_gcm_impl = IMPL_FASTEST; static uint32_t user_sel_impl = IMPL_FASTEST; -static inline int gcm_init_ctx_impl(boolean_t, gcm_ctx_t *, char *, size_t, - int (*)(const void *, const uint8_t *, uint8_t *), - void (*)(uint8_t *, uint8_t *), - void (*)(uint8_t *, uint8_t *)); - #ifdef CAN_USE_GCM_ASM /* Does the architecture we run on support the MOVBE instruction? */ boolean_t gcm_avx_can_use_movbe = B_FALSE; /* * Whether to use the optimized openssl gcm and ghash implementations. * Set to true if module parameter icp_gcm_impl == "avx". */ static boolean_t gcm_use_avx = B_FALSE; #define GCM_IMPL_USE_AVX (*(volatile boolean_t *)&gcm_use_avx) extern boolean_t ASMABI atomic_toggle_boolean_nv(volatile boolean_t *); static inline boolean_t gcm_avx_will_work(void); static inline void gcm_set_avx(boolean_t); static inline boolean_t gcm_toggle_avx(void); static inline size_t gcm_simd_get_htab_size(boolean_t); static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *, char *, size_t, crypto_data_t *, size_t); static int gcm_encrypt_final_avx(gcm_ctx_t *, crypto_data_t *, size_t); static int gcm_decrypt_final_avx(gcm_ctx_t *, crypto_data_t *, size_t); static int gcm_init_avx(gcm_ctx_t *, const uint8_t *, size_t, const uint8_t *, size_t, size_t); #endif /* ifdef CAN_USE_GCM_ASM */ /* * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode * is done in another function. */ int gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, crypto_data_t *out, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { #ifdef CAN_USE_GCM_ASM if (ctx->gcm_use_avx == B_TRUE) return (gcm_mode_encrypt_contiguous_blocks_avx( ctx, data, length, out, block_size)); #endif const gcm_impl_ops_t *gops; size_t remainder = length; size_t need = 0; uint8_t *datap = (uint8_t *)data; uint8_t *blockp; uint8_t *lastp; void *iov_or_mp; offset_t offset; uint8_t *out_data_1; uint8_t *out_data_2; size_t out_data_1_len; uint64_t counter; uint64_t counter_mask = ntohll(0x00000000ffffffffULL); if (length + ctx->gcm_remainder_len < block_size) { /* accumulate bytes here and return */ memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, datap, length); ctx->gcm_remainder_len += length; if (ctx->gcm_copy_to == NULL) { ctx->gcm_copy_to = datap; } return (CRYPTO_SUCCESS); } crypto_init_ptrs(out, &iov_or_mp, &offset); gops = gcm_impl_get_ops(); do { /* Unprocessed data from last call. */ if (ctx->gcm_remainder_len > 0) { need = block_size - ctx->gcm_remainder_len; if (need > remainder) return (CRYPTO_DATA_LEN_RANGE); memcpy(&((uint8_t *)ctx->gcm_remainder) [ctx->gcm_remainder_len], datap, need); blockp = (uint8_t *)ctx->gcm_remainder; } else { blockp = datap; } /* * Increment counter. Counter bits are confined * to the bottom 32 bits of the counter block. */ counter = ntohll(ctx->gcm_cb[1] & counter_mask); counter = htonll(counter + 1); counter &= counter_mask; ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, (uint8_t *)ctx->gcm_tmp); xor_block(blockp, (uint8_t *)ctx->gcm_tmp); lastp = (uint8_t *)ctx->gcm_tmp; ctx->gcm_processed_data_len += block_size; crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, &out_data_1_len, &out_data_2, block_size); /* copy block to where it belongs */ if (out_data_1_len == block_size) { copy_block(lastp, out_data_1); } else { memcpy(out_data_1, lastp, out_data_1_len); if (out_data_2 != NULL) { memcpy(out_data_2, lastp + out_data_1_len, block_size - out_data_1_len); } } /* update offset */ out->cd_offset += block_size; /* add ciphertext to the hash */ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gops); /* Update pointer to next block of data to be processed. */ if (ctx->gcm_remainder_len != 0) { datap += need; ctx->gcm_remainder_len = 0; } else { datap += block_size; } remainder = (size_t)&data[length] - (size_t)datap; /* Incomplete last block. */ if (remainder > 0 && remainder < block_size) { memcpy(ctx->gcm_remainder, datap, remainder); ctx->gcm_remainder_len = remainder; ctx->gcm_copy_to = datap; goto out; } ctx->gcm_copy_to = NULL; } while (remainder > 0); out: return (CRYPTO_SUCCESS); } int gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { (void) copy_block; #ifdef CAN_USE_GCM_ASM if (ctx->gcm_use_avx == B_TRUE) return (gcm_encrypt_final_avx(ctx, out, block_size)); #endif const gcm_impl_ops_t *gops; uint64_t counter_mask = ntohll(0x00000000ffffffffULL); uint8_t *ghash, *macp = NULL; int i, rv; if (out->cd_length < (ctx->gcm_remainder_len + ctx->gcm_tag_len)) { return (CRYPTO_DATA_LEN_RANGE); } gops = gcm_impl_get_ops(); ghash = (uint8_t *)ctx->gcm_ghash; if (ctx->gcm_remainder_len > 0) { uint64_t counter; uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp; /* * Here is where we deal with data that is not a * multiple of the block size. */ /* * Increment counter. */ counter = ntohll(ctx->gcm_cb[1] & counter_mask); counter = htonll(counter + 1); counter &= counter_mask; ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, (uint8_t *)ctx->gcm_tmp); macp = (uint8_t *)ctx->gcm_remainder; memset(macp + ctx->gcm_remainder_len, 0, block_size - ctx->gcm_remainder_len); /* XOR with counter block */ for (i = 0; i < ctx->gcm_remainder_len; i++) { macp[i] ^= tmpp[i]; } /* add ciphertext to the hash */ GHASH(ctx, macp, ghash, gops); ctx->gcm_processed_data_len += ctx->gcm_remainder_len; } ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len)); GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops); encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, (uint8_t *)ctx->gcm_J0); xor_block((uint8_t *)ctx->gcm_J0, ghash); if (ctx->gcm_remainder_len > 0) { rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len); if (rv != CRYPTO_SUCCESS) return (rv); } out->cd_offset += ctx->gcm_remainder_len; ctx->gcm_remainder_len = 0; rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); if (rv != CRYPTO_SUCCESS) return (rv); out->cd_offset += ctx->gcm_tag_len; return (CRYPTO_SUCCESS); } /* * This will only deal with decrypting the last block of the input that * might not be a multiple of block length. */ static void gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { uint8_t *datap, *outp, *counterp; uint64_t counter; uint64_t counter_mask = ntohll(0x00000000ffffffffULL); int i; /* * Increment counter. * Counter bits are confined to the bottom 32 bits */ counter = ntohll(ctx->gcm_cb[1] & counter_mask); counter = htonll(counter + 1); counter &= counter_mask; ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; datap = (uint8_t *)ctx->gcm_remainder; outp = &((ctx->gcm_pt_buf)[index]); counterp = (uint8_t *)ctx->gcm_tmp; /* authentication tag */ memset((uint8_t *)ctx->gcm_tmp, 0, block_size); memcpy((uint8_t *)ctx->gcm_tmp, datap, ctx->gcm_remainder_len); /* add ciphertext to the hash */ GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash, gcm_impl_get_ops()); /* decrypt remaining ciphertext */ encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp); /* XOR with counter block */ for (i = 0; i < ctx->gcm_remainder_len; i++) { outp[i] = datap[i] ^ counterp[i]; } } int gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length, crypto_data_t *out, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { (void) out, (void) block_size, (void) encrypt_block, (void) copy_block, (void) xor_block; size_t new_len; uint8_t *new; /* * Copy contiguous ciphertext input blocks to plaintext buffer. * Ciphertext will be decrypted in the final. */ if (length > 0) { new_len = ctx->gcm_pt_buf_len + length; new = vmem_alloc(new_len, KM_SLEEP); if (new == NULL) { vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); ctx->gcm_pt_buf = NULL; return (CRYPTO_HOST_MEMORY); } if (ctx->gcm_pt_buf != NULL) { memcpy(new, ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); } else { ASSERT0(ctx->gcm_pt_buf_len); } ctx->gcm_pt_buf = new; ctx->gcm_pt_buf_len = new_len; memcpy(&ctx->gcm_pt_buf[ctx->gcm_processed_data_len], data, length); ctx->gcm_processed_data_len += length; } ctx->gcm_remainder_len = 0; return (CRYPTO_SUCCESS); } int gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { #ifdef CAN_USE_GCM_ASM if (ctx->gcm_use_avx == B_TRUE) return (gcm_decrypt_final_avx(ctx, out, block_size)); #endif const gcm_impl_ops_t *gops; size_t pt_len; size_t remainder; uint8_t *ghash; uint8_t *blockp; uint8_t *cbp; uint64_t counter; uint64_t counter_mask = ntohll(0x00000000ffffffffULL); int processed = 0, rv; ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len); gops = gcm_impl_get_ops(); pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; ghash = (uint8_t *)ctx->gcm_ghash; blockp = ctx->gcm_pt_buf; remainder = pt_len; while (remainder > 0) { /* Incomplete last block */ if (remainder < block_size) { memcpy(ctx->gcm_remainder, blockp, remainder); ctx->gcm_remainder_len = remainder; /* * not expecting anymore ciphertext, just * compute plaintext for the remaining input */ gcm_decrypt_incomplete_block(ctx, block_size, processed, encrypt_block, xor_block); ctx->gcm_remainder_len = 0; goto out; } /* add ciphertext to the hash */ GHASH(ctx, blockp, ghash, gops); /* * Increment counter. * Counter bits are confined to the bottom 32 bits */ counter = ntohll(ctx->gcm_cb[1] & counter_mask); counter = htonll(counter + 1); counter &= counter_mask; ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; cbp = (uint8_t *)ctx->gcm_tmp; encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp); /* XOR with ciphertext */ xor_block(cbp, blockp); processed += block_size; blockp += block_size; remainder -= block_size; } out: ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len)); GHASH(ctx, ctx->gcm_len_a_len_c, ghash, gops); encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0, (uint8_t *)ctx->gcm_J0); xor_block((uint8_t *)ctx->gcm_J0, ghash); /* compare the input authentication tag with what we calculated */ if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { /* They don't match */ return (CRYPTO_INVALID_MAC); } else { rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); if (rv != CRYPTO_SUCCESS) return (rv); out->cd_offset += pt_len; } return (CRYPTO_SUCCESS); } static int gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param) { size_t tag_len; /* * Check the length of the authentication tag (in bits). */ tag_len = gcm_param->ulTagBits; switch (tag_len) { case 32: case 64: case 96: case 104: case 112: case 120: case 128: break; default: return (CRYPTO_MECHANISM_PARAM_INVALID); } if (gcm_param->ulIvLen == 0) return (CRYPTO_MECHANISM_PARAM_INVALID); return (CRYPTO_SUCCESS); } static void gcm_format_initial_blocks(const uint8_t *iv, ulong_t iv_len, gcm_ctx_t *ctx, size_t block_size, void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { const gcm_impl_ops_t *gops; uint8_t *cb; ulong_t remainder = iv_len; ulong_t processed = 0; uint8_t *datap, *ghash; uint64_t len_a_len_c[2]; gops = gcm_impl_get_ops(); ghash = (uint8_t *)ctx->gcm_ghash; cb = (uint8_t *)ctx->gcm_cb; if (iv_len == 12) { memcpy(cb, iv, 12); cb[12] = 0; cb[13] = 0; cb[14] = 0; cb[15] = 1; /* J0 will be used again in the final */ copy_block(cb, (uint8_t *)ctx->gcm_J0); } else { /* GHASH the IV */ do { if (remainder < block_size) { memset(cb, 0, block_size); memcpy(cb, &(iv[processed]), remainder); datap = (uint8_t *)cb; remainder = 0; } else { datap = (uint8_t *)(&(iv[processed])); processed += block_size; remainder -= block_size; } GHASH(ctx, datap, ghash, gops); } while (remainder > 0); len_a_len_c[0] = 0; len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len)); GHASH(ctx, len_a_len_c, ctx->gcm_J0, gops); /* J0 will be used again in the final */ copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb); } } static int gcm_init(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len, const uint8_t *auth_data, size_t auth_data_len, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { const gcm_impl_ops_t *gops; uint8_t *ghash, *datap, *authp; size_t remainder, processed; /* encrypt zero block to get subkey H */ memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H)); encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H, (uint8_t *)ctx->gcm_H); gcm_format_initial_blocks(iv, iv_len, ctx, block_size, copy_block, xor_block); gops = gcm_impl_get_ops(); authp = (uint8_t *)ctx->gcm_tmp; ghash = (uint8_t *)ctx->gcm_ghash; memset(authp, 0, block_size); memset(ghash, 0, block_size); processed = 0; remainder = auth_data_len; do { if (remainder < block_size) { /* * There's not a block full of data, pad rest of * buffer with zero */ if (auth_data != NULL) { memset(authp, 0, block_size); memcpy(authp, &(auth_data[processed]), remainder); } else { ASSERT0(remainder); } datap = (uint8_t *)authp; remainder = 0; } else { datap = (uint8_t *)(&(auth_data[processed])); processed += block_size; remainder -= block_size; } /* add auth data to the hash */ GHASH(ctx, datap, ghash, gops); } while (remainder > 0); return (CRYPTO_SUCCESS); } -/* - * The following function is called at encrypt or decrypt init time - * for AES GCM mode. - */ -int -gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, - int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)) -{ - return (gcm_init_ctx_impl(B_FALSE, gcm_ctx, param, block_size, - encrypt_block, copy_block, xor_block)); -} - -/* - * The following function is called at encrypt or decrypt init time - * for AES GMAC mode. - */ -int -gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, - int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)) -{ - return (gcm_init_ctx_impl(B_TRUE, gcm_ctx, param, block_size, - encrypt_block, copy_block, xor_block)); -} - /* * Init the GCM context struct. Handle the cycle and avx implementations here. - * Initialization of a GMAC context differs slightly from a GCM context. */ -static inline int -gcm_init_ctx_impl(boolean_t gmac_mode, gcm_ctx_t *gcm_ctx, char *param, +int +gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)) { CK_AES_GCM_PARAMS *gcm_param; int rv = CRYPTO_SUCCESS; size_t tag_len, iv_len; if (param != NULL) { gcm_param = (CK_AES_GCM_PARAMS *)(void *)param; - if (gmac_mode == B_FALSE) { - /* GCM mode. */ - if ((rv = gcm_validate_args(gcm_param)) != 0) { - return (rv); - } - gcm_ctx->gcm_flags |= GCM_MODE; - - size_t tbits = gcm_param->ulTagBits; - tag_len = CRYPTO_BITS2BYTES(tbits); - iv_len = gcm_param->ulIvLen; - } else { - /* GMAC mode. */ - gcm_ctx->gcm_flags |= GMAC_MODE; - tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS); - iv_len = AES_GMAC_IV_LEN; + /* GCM mode. */ + if ((rv = gcm_validate_args(gcm_param)) != 0) { + return (rv); } + gcm_ctx->gcm_flags |= GCM_MODE; + + size_t tbits = gcm_param->ulTagBits; + tag_len = CRYPTO_BITS2BYTES(tbits); + iv_len = gcm_param->ulIvLen; + gcm_ctx->gcm_tag_len = tag_len; gcm_ctx->gcm_processed_data_len = 0; /* these values are in bits */ gcm_ctx->gcm_len_a_len_c[0] = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen)); } else { return (CRYPTO_MECHANISM_PARAM_INVALID); } const uint8_t *iv = (const uint8_t *)gcm_param->pIv; const uint8_t *aad = (const uint8_t *)gcm_param->pAAD; size_t aad_len = gcm_param->ulAADLen; #ifdef CAN_USE_GCM_ASM boolean_t needs_bswap = ((aes_key_t *)gcm_ctx->gcm_keysched)->ops->needs_byteswap; if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) { gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX; } else { /* * Handle the "cycle" implementation by creating avx and * non-avx contexts alternately. */ gcm_ctx->gcm_use_avx = gcm_toggle_avx(); /* The avx impl. doesn't handle byte swapped key schedules. */ if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) { gcm_ctx->gcm_use_avx = B_FALSE; } /* * If this is a GCM context, use the MOVBE and the BSWAP - * variants alternately. GMAC contexts code paths do not - * use the MOVBE instruction. + * variants alternately. */ - if (gcm_ctx->gcm_use_avx == B_TRUE && gmac_mode == B_FALSE && + if (gcm_ctx->gcm_use_avx == B_TRUE && zfs_movbe_available() == B_TRUE) { (void) atomic_toggle_boolean_nv( (volatile boolean_t *)&gcm_avx_can_use_movbe); } } /* * We don't handle byte swapped key schedules in the avx code path, * still they could be created by the aes generic implementation. * Make sure not to use them since we'll corrupt data if we do. */ if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) { gcm_ctx->gcm_use_avx = B_FALSE; cmn_err_once(CE_WARN, "ICP: Can't use the aes generic or cycle implementations " "in combination with the gcm avx implementation!"); cmn_err_once(CE_WARN, "ICP: Falling back to a compatible implementation, " "aes-gcm performance will likely be degraded."); cmn_err_once(CE_WARN, "ICP: Choose at least the x86_64 aes implementation to " "restore performance."); } /* Allocate Htab memory as needed. */ if (gcm_ctx->gcm_use_avx == B_TRUE) { size_t htab_len = gcm_simd_get_htab_size(gcm_ctx->gcm_use_avx); if (htab_len == 0) { return (CRYPTO_MECHANISM_PARAM_INVALID); } gcm_ctx->gcm_htab_len = htab_len; gcm_ctx->gcm_Htable = kmem_alloc(htab_len, KM_SLEEP); if (gcm_ctx->gcm_Htable == NULL) { return (CRYPTO_HOST_MEMORY); } } /* Avx and non avx context initialization differs from here on. */ if (gcm_ctx->gcm_use_avx == B_FALSE) { #endif /* ifdef CAN_USE_GCM_ASM */ if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size, encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) { rv = CRYPTO_MECHANISM_PARAM_INVALID; } #ifdef CAN_USE_GCM_ASM } else { if (gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len, block_size) != CRYPTO_SUCCESS) { rv = CRYPTO_MECHANISM_PARAM_INVALID; } } #endif /* ifdef CAN_USE_GCM_ASM */ return (rv); } void * gcm_alloc_ctx(int kmflag) { gcm_ctx_t *gcm_ctx; if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) return (NULL); gcm_ctx->gcm_flags = GCM_MODE; return (gcm_ctx); } -void * -gmac_alloc_ctx(int kmflag) -{ - gcm_ctx_t *gcm_ctx; - - if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL) - return (NULL); - - gcm_ctx->gcm_flags = GMAC_MODE; - return (gcm_ctx); -} - /* GCM implementation that contains the fastest methods */ static gcm_impl_ops_t gcm_fastest_impl = { .name = "fastest" }; /* All compiled in implementations */ static const gcm_impl_ops_t *gcm_all_impl[] = { &gcm_generic_impl, #if defined(__x86_64) && defined(HAVE_PCLMULQDQ) &gcm_pclmulqdq_impl, #endif }; /* Indicate that benchmark has been completed */ static boolean_t gcm_impl_initialized = B_FALSE; /* Hold all supported implementations */ static size_t gcm_supp_impl_cnt = 0; static gcm_impl_ops_t *gcm_supp_impl[ARRAY_SIZE(gcm_all_impl)]; /* * Returns the GCM operations for encrypt/decrypt/key setup. When a * SIMD implementation is not allowed in the current context, then * fallback to the fastest generic implementation. */ const gcm_impl_ops_t * gcm_impl_get_ops(void) { if (!kfpu_allowed()) return (&gcm_generic_impl); const gcm_impl_ops_t *ops = NULL; const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl); switch (impl) { case IMPL_FASTEST: ASSERT(gcm_impl_initialized); ops = &gcm_fastest_impl; break; case IMPL_CYCLE: /* Cycle through supported implementations */ ASSERT(gcm_impl_initialized); ASSERT3U(gcm_supp_impl_cnt, >, 0); static size_t cycle_impl_idx = 0; size_t idx = (++cycle_impl_idx) % gcm_supp_impl_cnt; ops = gcm_supp_impl[idx]; break; #ifdef CAN_USE_GCM_ASM case IMPL_AVX: /* * Make sure that we return a valid implementation while * switching to the avx implementation since there still * may be unfinished non-avx contexts around. */ ops = &gcm_generic_impl; break; #endif default: ASSERT3U(impl, <, gcm_supp_impl_cnt); ASSERT3U(gcm_supp_impl_cnt, >, 0); if (impl < ARRAY_SIZE(gcm_all_impl)) ops = gcm_supp_impl[impl]; break; } ASSERT3P(ops, !=, NULL); return (ops); } /* * Initialize all supported implementations. */ void gcm_impl_init(void) { gcm_impl_ops_t *curr_impl; int i, c; /* Move supported implementations into gcm_supp_impls */ for (i = 0, c = 0; i < ARRAY_SIZE(gcm_all_impl); i++) { curr_impl = (gcm_impl_ops_t *)gcm_all_impl[i]; if (curr_impl->is_supported()) gcm_supp_impl[c++] = (gcm_impl_ops_t *)curr_impl; } gcm_supp_impl_cnt = c; /* * Set the fastest implementation given the assumption that the * hardware accelerated version is the fastest. */ #if defined(__x86_64) && defined(HAVE_PCLMULQDQ) if (gcm_pclmulqdq_impl.is_supported()) { memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl, sizeof (gcm_fastest_impl)); } else #endif { memcpy(&gcm_fastest_impl, &gcm_generic_impl, sizeof (gcm_fastest_impl)); } strlcpy(gcm_fastest_impl.name, "fastest", GCM_IMPL_NAME_MAX); #ifdef CAN_USE_GCM_ASM /* * Use the avx implementation if it's available and the implementation * hasn't changed from its default value of fastest on module load. */ if (gcm_avx_will_work()) { #ifdef HAVE_MOVBE if (zfs_movbe_available() == B_TRUE) { atomic_swap_32(&gcm_avx_can_use_movbe, B_TRUE); } #endif if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) { gcm_set_avx(B_TRUE); } } #endif /* Finish initialization */ atomic_swap_32(&icp_gcm_impl, user_sel_impl); gcm_impl_initialized = B_TRUE; } static const struct { const char *name; uint32_t sel; } gcm_impl_opts[] = { { "cycle", IMPL_CYCLE }, { "fastest", IMPL_FASTEST }, #ifdef CAN_USE_GCM_ASM { "avx", IMPL_AVX }, #endif }; /* * Function sets desired gcm implementation. * * If we are called before init(), user preference will be saved in * user_sel_impl, and applied in later init() call. This occurs when module * parameter is specified on module load. Otherwise, directly update * icp_gcm_impl. * * @val Name of gcm implementation to use * @param Unused. */ int gcm_impl_set(const char *val) { int err = -EINVAL; char req_name[GCM_IMPL_NAME_MAX]; uint32_t impl = GCM_IMPL_READ(user_sel_impl); size_t i; /* sanitize input */ i = strnlen(val, GCM_IMPL_NAME_MAX); if (i == 0 || i >= GCM_IMPL_NAME_MAX) return (err); strlcpy(req_name, val, GCM_IMPL_NAME_MAX); while (i > 0 && isspace(req_name[i-1])) i--; req_name[i] = '\0'; /* Check mandatory options */ for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) { #ifdef CAN_USE_GCM_ASM /* Ignore avx implementation if it won't work. */ if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) { continue; } #endif if (strcmp(req_name, gcm_impl_opts[i].name) == 0) { impl = gcm_impl_opts[i].sel; err = 0; break; } } /* check all supported impl if init() was already called */ if (err != 0 && gcm_impl_initialized) { /* check all supported implementations */ for (i = 0; i < gcm_supp_impl_cnt; i++) { if (strcmp(req_name, gcm_supp_impl[i]->name) == 0) { impl = i; err = 0; break; } } } #ifdef CAN_USE_GCM_ASM /* * Use the avx implementation if available and the requested one is * avx or fastest. */ if (gcm_avx_will_work() == B_TRUE && (impl == IMPL_AVX || impl == IMPL_FASTEST)) { gcm_set_avx(B_TRUE); } else { gcm_set_avx(B_FALSE); } #endif if (err == 0) { if (gcm_impl_initialized) atomic_swap_32(&icp_gcm_impl, impl); else atomic_swap_32(&user_sel_impl, impl); } return (err); } #if defined(_KERNEL) && defined(__linux__) static int icp_gcm_impl_set(const char *val, zfs_kernel_param_t *kp) { return (gcm_impl_set(val)); } static int icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp) { int i, cnt = 0; char *fmt; const uint32_t impl = GCM_IMPL_READ(icp_gcm_impl); ASSERT(gcm_impl_initialized); /* list mandatory options */ for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) { #ifdef CAN_USE_GCM_ASM /* Ignore avx implementation if it won't work. */ if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) { continue; } #endif fmt = (impl == gcm_impl_opts[i].sel) ? "[%s] " : "%s "; cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt, gcm_impl_opts[i].name); } /* list all supported implementations */ for (i = 0; i < gcm_supp_impl_cnt; i++) { fmt = (i == impl) ? "[%s] " : "%s "; cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt, gcm_supp_impl[i]->name); } return (cnt); } module_param_call(icp_gcm_impl, icp_gcm_impl_set, icp_gcm_impl_get, NULL, 0644); MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation."); #endif /* defined(__KERNEL) */ #ifdef CAN_USE_GCM_ASM #define GCM_BLOCK_LEN 16 /* * The openssl asm routines are 6x aggregated and need that many bytes * at minimum. */ #define GCM_AVX_MIN_DECRYPT_BYTES (GCM_BLOCK_LEN * 6) #define GCM_AVX_MIN_ENCRYPT_BYTES (GCM_BLOCK_LEN * 6 * 3) /* * Ensure the chunk size is reasonable since we are allocating a * GCM_AVX_MAX_CHUNK_SIZEd buffer and disabling preemption and interrupts. */ #define GCM_AVX_MAX_CHUNK_SIZE \ (((128*1024)/GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES) /* Clear the FPU registers since they hold sensitive internal state. */ #define clear_fpu_regs() clear_fpu_regs_avx() #define GHASH_AVX(ctx, in, len) \ gcm_ghash_avx((ctx)->gcm_ghash, (const uint64_t *)(ctx)->gcm_Htable, \ in, len) #define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1) /* Get the chunk size module parameter. */ #define GCM_CHUNK_SIZE_READ *(volatile uint32_t *) &gcm_avx_chunk_size /* * Module parameter: number of bytes to process at once while owning the FPU. * Rounded down to the next GCM_AVX_MIN_DECRYPT_BYTES byte boundary and is * ensured to be greater or equal than GCM_AVX_MIN_DECRYPT_BYTES. */ static uint32_t gcm_avx_chunk_size = ((32 * 1024) / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES; extern void ASMABI clear_fpu_regs_avx(void); extern void ASMABI gcm_xor_avx(const uint8_t *src, uint8_t *dst); extern void ASMABI aes_encrypt_intel(const uint32_t rk[], int nr, const uint32_t pt[4], uint32_t ct[4]); extern void ASMABI gcm_init_htab_avx(uint64_t *Htable, const uint64_t H[2]); extern void ASMABI gcm_ghash_avx(uint64_t ghash[2], const uint64_t *Htable, const uint8_t *in, size_t len); extern size_t ASMABI aesni_gcm_encrypt(const uint8_t *, uint8_t *, size_t, const void *, uint64_t *, uint64_t *); extern size_t ASMABI aesni_gcm_decrypt(const uint8_t *, uint8_t *, size_t, const void *, uint64_t *, uint64_t *); static inline boolean_t gcm_avx_will_work(void) { /* Avx should imply aes-ni and pclmulqdq, but make sure anyhow. */ return (kfpu_allowed() && zfs_avx_available() && zfs_aes_available() && zfs_pclmulqdq_available()); } static inline void gcm_set_avx(boolean_t val) { if (gcm_avx_will_work() == B_TRUE) { atomic_swap_32(&gcm_use_avx, val); } } static inline boolean_t gcm_toggle_avx(void) { if (gcm_avx_will_work() == B_TRUE) { return (atomic_toggle_boolean_nv(&GCM_IMPL_USE_AVX)); } else { return (B_FALSE); } } static inline size_t gcm_simd_get_htab_size(boolean_t simd_mode) { switch (simd_mode) { case B_TRUE: return (2 * 6 * 2 * sizeof (uint64_t)); default: return (0); } } /* Increment the GCM counter block by n. */ static inline void gcm_incr_counter_block_by(gcm_ctx_t *ctx, int n) { uint64_t counter_mask = ntohll(0x00000000ffffffffULL); uint64_t counter = ntohll(ctx->gcm_cb[1] & counter_mask); counter = htonll(counter + n); counter &= counter_mask; ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter; } /* * Encrypt multiple blocks of data in GCM mode. * This is done in gcm_avx_chunk_size chunks, utilizing AVX assembler routines * if possible. While processing a chunk the FPU is "locked". */ static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data, size_t length, crypto_data_t *out, size_t block_size) { size_t bleft = length; size_t need = 0; size_t done = 0; uint8_t *datap = (uint8_t *)data; size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched); uint64_t *ghash = ctx->gcm_ghash; uint64_t *cb = ctx->gcm_cb; uint8_t *ct_buf = NULL; uint8_t *tmp = (uint8_t *)ctx->gcm_tmp; int rv = CRYPTO_SUCCESS; ASSERT(block_size == GCM_BLOCK_LEN); ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==, B_FALSE); /* * If the last call left an incomplete block, try to fill * it first. */ if (ctx->gcm_remainder_len > 0) { need = block_size - ctx->gcm_remainder_len; if (length < need) { /* Accumulate bytes here and return. */ memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, datap, length); ctx->gcm_remainder_len += length; if (ctx->gcm_copy_to == NULL) { ctx->gcm_copy_to = datap; } return (CRYPTO_SUCCESS); } else { /* Complete incomplete block. */ memcpy((uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len, datap, need); ctx->gcm_copy_to = NULL; } } /* Allocate a buffer to encrypt to if there is enough input. */ if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) { ct_buf = vmem_alloc(chunk_size, KM_SLEEP); if (ct_buf == NULL) { return (CRYPTO_HOST_MEMORY); } } /* If we completed an incomplete block, encrypt and write it out. */ if (ctx->gcm_remainder_len > 0) { kfpu_begin(); aes_encrypt_intel(key->encr_ks.ks32, key->nr, (const uint32_t *)cb, (uint32_t *)tmp); gcm_xor_avx((const uint8_t *) ctx->gcm_remainder, tmp); GHASH_AVX(ctx, tmp, block_size); clear_fpu_regs(); kfpu_end(); rv = crypto_put_output_data(tmp, out, block_size); out->cd_offset += block_size; gcm_incr_counter_block(ctx); ctx->gcm_processed_data_len += block_size; bleft -= need; datap += need; ctx->gcm_remainder_len = 0; } /* Do the bulk encryption in chunk_size blocks. */ for (; bleft >= chunk_size; bleft -= chunk_size) { kfpu_begin(); done = aesni_gcm_encrypt( datap, ct_buf, chunk_size, key, cb, ghash); clear_fpu_regs(); kfpu_end(); if (done != chunk_size) { rv = CRYPTO_FAILED; goto out_nofpu; } rv = crypto_put_output_data(ct_buf, out, chunk_size); if (rv != CRYPTO_SUCCESS) { goto out_nofpu; } out->cd_offset += chunk_size; datap += chunk_size; ctx->gcm_processed_data_len += chunk_size; } /* Check if we are already done. */ if (bleft == 0) { goto out_nofpu; } /* Bulk encrypt the remaining data. */ kfpu_begin(); if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) { done = aesni_gcm_encrypt(datap, ct_buf, bleft, key, cb, ghash); if (done == 0) { rv = CRYPTO_FAILED; goto out; } rv = crypto_put_output_data(ct_buf, out, done); if (rv != CRYPTO_SUCCESS) { goto out; } out->cd_offset += done; ctx->gcm_processed_data_len += done; datap += done; bleft -= done; } /* Less than GCM_AVX_MIN_ENCRYPT_BYTES remain, operate on blocks. */ while (bleft > 0) { if (bleft < block_size) { memcpy(ctx->gcm_remainder, datap, bleft); ctx->gcm_remainder_len = bleft; ctx->gcm_copy_to = datap; goto out; } /* Encrypt, hash and write out. */ aes_encrypt_intel(key->encr_ks.ks32, key->nr, (const uint32_t *)cb, (uint32_t *)tmp); gcm_xor_avx(datap, tmp); GHASH_AVX(ctx, tmp, block_size); rv = crypto_put_output_data(tmp, out, block_size); if (rv != CRYPTO_SUCCESS) { goto out; } out->cd_offset += block_size; gcm_incr_counter_block(ctx); ctx->gcm_processed_data_len += block_size; datap += block_size; bleft -= block_size; } out: clear_fpu_regs(); kfpu_end(); out_nofpu: if (ct_buf != NULL) { vmem_free(ct_buf, chunk_size); } return (rv); } /* * Finalize the encryption: Zero fill, encrypt, hash and write out an eventual * incomplete last block. Encrypt the ICB. Calculate the tag and write it out. */ static int gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) { uint8_t *ghash = (uint8_t *)ctx->gcm_ghash; uint32_t *J0 = (uint32_t *)ctx->gcm_J0; uint8_t *remainder = (uint8_t *)ctx->gcm_remainder; size_t rem_len = ctx->gcm_remainder_len; const void *keysched = ((aes_key_t *)ctx->gcm_keysched)->encr_ks.ks32; int aes_rounds = ((aes_key_t *)keysched)->nr; int rv; ASSERT(block_size == GCM_BLOCK_LEN); ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==, B_FALSE); if (out->cd_length < (rem_len + ctx->gcm_tag_len)) { return (CRYPTO_DATA_LEN_RANGE); } kfpu_begin(); /* Pad last incomplete block with zeros, encrypt and hash. */ if (rem_len > 0) { uint8_t *tmp = (uint8_t *)ctx->gcm_tmp; const uint32_t *cb = (uint32_t *)ctx->gcm_cb; aes_encrypt_intel(keysched, aes_rounds, cb, (uint32_t *)tmp); memset(remainder + rem_len, 0, block_size - rem_len); for (int i = 0; i < rem_len; i++) { remainder[i] ^= tmp[i]; } GHASH_AVX(ctx, remainder, block_size); ctx->gcm_processed_data_len += rem_len; /* No need to increment counter_block, it's the last block. */ } /* Finish tag. */ ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len)); GHASH_AVX(ctx, (const uint8_t *)ctx->gcm_len_a_len_c, block_size); aes_encrypt_intel(keysched, aes_rounds, J0, J0); gcm_xor_avx((uint8_t *)J0, ghash); clear_fpu_regs(); kfpu_end(); /* Output remainder. */ if (rem_len > 0) { rv = crypto_put_output_data(remainder, out, rem_len); if (rv != CRYPTO_SUCCESS) return (rv); } out->cd_offset += rem_len; ctx->gcm_remainder_len = 0; rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len); if (rv != CRYPTO_SUCCESS) return (rv); out->cd_offset += ctx->gcm_tag_len; return (CRYPTO_SUCCESS); } /* * Finalize decryption: We just have accumulated crypto text, so now we * decrypt it here inplace. */ static int gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size) { ASSERT3U(ctx->gcm_processed_data_len, ==, ctx->gcm_pt_buf_len); ASSERT3U(block_size, ==, 16); ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==, B_FALSE); size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; uint8_t *datap = ctx->gcm_pt_buf; const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched); uint32_t *cb = (uint32_t *)ctx->gcm_cb; uint64_t *ghash = ctx->gcm_ghash; uint32_t *tmp = (uint32_t *)ctx->gcm_tmp; int rv = CRYPTO_SUCCESS; size_t bleft, done; /* * Decrypt in chunks of gcm_avx_chunk_size, which is asserted to be * greater or equal than GCM_AVX_MIN_ENCRYPT_BYTES, and a multiple of * GCM_AVX_MIN_DECRYPT_BYTES. */ for (bleft = pt_len; bleft >= chunk_size; bleft -= chunk_size) { kfpu_begin(); done = aesni_gcm_decrypt(datap, datap, chunk_size, (const void *)key, ctx->gcm_cb, ghash); clear_fpu_regs(); kfpu_end(); if (done != chunk_size) { return (CRYPTO_FAILED); } datap += done; } /* Decrypt remainder, which is less than chunk size, in one go. */ kfpu_begin(); if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) { done = aesni_gcm_decrypt(datap, datap, bleft, (const void *)key, ctx->gcm_cb, ghash); if (done == 0) { clear_fpu_regs(); kfpu_end(); return (CRYPTO_FAILED); } datap += done; bleft -= done; } ASSERT(bleft < GCM_AVX_MIN_DECRYPT_BYTES); /* * Now less than GCM_AVX_MIN_DECRYPT_BYTES bytes remain, * decrypt them block by block. */ while (bleft > 0) { /* Incomplete last block. */ if (bleft < block_size) { uint8_t *lastb = (uint8_t *)ctx->gcm_remainder; memset(lastb, 0, block_size); memcpy(lastb, datap, bleft); /* The GCM processing. */ GHASH_AVX(ctx, lastb, block_size); aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp); for (size_t i = 0; i < bleft; i++) { datap[i] = lastb[i] ^ ((uint8_t *)tmp)[i]; } break; } /* The GCM processing. */ GHASH_AVX(ctx, datap, block_size); aes_encrypt_intel(key->encr_ks.ks32, key->nr, cb, tmp); gcm_xor_avx((uint8_t *)tmp, datap); gcm_incr_counter_block(ctx); datap += block_size; bleft -= block_size; } if (rv != CRYPTO_SUCCESS) { clear_fpu_regs(); kfpu_end(); return (rv); } /* Decryption done, finish the tag. */ ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len)); GHASH_AVX(ctx, (uint8_t *)ctx->gcm_len_a_len_c, block_size); aes_encrypt_intel(key->encr_ks.ks32, key->nr, (uint32_t *)ctx->gcm_J0, (uint32_t *)ctx->gcm_J0); gcm_xor_avx((uint8_t *)ctx->gcm_J0, (uint8_t *)ghash); /* We are done with the FPU, restore its state. */ clear_fpu_regs(); kfpu_end(); /* Compare the input authentication tag with what we calculated. */ if (memcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) { /* They don't match. */ return (CRYPTO_INVALID_MAC); } rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len); if (rv != CRYPTO_SUCCESS) { return (rv); } out->cd_offset += pt_len; return (CRYPTO_SUCCESS); } /* * Initialize the GCM params H, Htabtle and the counter block. Save the * initial counter block. */ static int gcm_init_avx(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len, const uint8_t *auth_data, size_t auth_data_len, size_t block_size) { uint8_t *cb = (uint8_t *)ctx->gcm_cb; uint64_t *H = ctx->gcm_H; const void *keysched = ((aes_key_t *)ctx->gcm_keysched)->encr_ks.ks32; int aes_rounds = ((aes_key_t *)ctx->gcm_keysched)->nr; const uint8_t *datap = auth_data; size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ; size_t bleft; ASSERT(block_size == GCM_BLOCK_LEN); ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==, B_FALSE); /* Init H (encrypt zero block) and create the initial counter block. */ memset(ctx->gcm_ghash, 0, sizeof (ctx->gcm_ghash)); memset(H, 0, sizeof (ctx->gcm_H)); kfpu_begin(); aes_encrypt_intel(keysched, aes_rounds, (const uint32_t *)H, (uint32_t *)H); gcm_init_htab_avx(ctx->gcm_Htable, H); if (iv_len == 12) { memcpy(cb, iv, 12); cb[12] = 0; cb[13] = 0; cb[14] = 0; cb[15] = 1; /* We need the ICB later. */ memcpy(ctx->gcm_J0, cb, sizeof (ctx->gcm_J0)); } else { /* * Most consumers use 12 byte IVs, so it's OK to use the * original routines for other IV sizes, just avoid nesting * kfpu_begin calls. */ clear_fpu_regs(); kfpu_end(); gcm_format_initial_blocks(iv, iv_len, ctx, block_size, aes_copy_block, aes_xor_block); kfpu_begin(); } /* Openssl post increments the counter, adjust for that. */ gcm_incr_counter_block(ctx); /* Ghash AAD in chunk_size blocks. */ for (bleft = auth_data_len; bleft >= chunk_size; bleft -= chunk_size) { GHASH_AVX(ctx, datap, chunk_size); datap += chunk_size; clear_fpu_regs(); kfpu_end(); kfpu_begin(); } /* Ghash the remainder and handle possible incomplete GCM block. */ if (bleft > 0) { size_t incomp = bleft % block_size; bleft -= incomp; if (bleft > 0) { GHASH_AVX(ctx, datap, bleft); datap += bleft; } if (incomp > 0) { /* Zero pad and hash incomplete last block. */ uint8_t *authp = (uint8_t *)ctx->gcm_tmp; memset(authp, 0, block_size); memcpy(authp, datap, incomp); GHASH_AVX(ctx, authp, block_size); } } clear_fpu_regs(); kfpu_end(); return (CRYPTO_SUCCESS); } #if defined(_KERNEL) static int icp_gcm_avx_set_chunk_size(const char *buf, zfs_kernel_param_t *kp) { unsigned long val; char val_rounded[16]; int error = 0; error = kstrtoul(buf, 0, &val); if (error) return (error); val = (val / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES; if (val < GCM_AVX_MIN_ENCRYPT_BYTES || val > GCM_AVX_MAX_CHUNK_SIZE) return (-EINVAL); snprintf(val_rounded, 16, "%u", (uint32_t)val); error = param_set_uint(val_rounded, kp); return (error); } module_param_call(icp_gcm_avx_chunk_size, icp_gcm_avx_set_chunk_size, param_get_uint, &gcm_avx_chunk_size, 0644); MODULE_PARM_DESC(icp_gcm_avx_chunk_size, "How many bytes to process while owning the FPU"); #endif /* defined(__KERNEL) */ #endif /* ifdef CAN_USE_GCM_ASM */ diff --git a/module/icp/algs/modes/modes.c b/module/icp/algs/modes/modes.c index 6f6649b3b58b..786a89f10c90 100644 --- a/module/icp/algs/modes/modes.c +++ b/module/icp/algs/modes/modes.c @@ -1,196 +1,186 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #include #include #include #include /* * Initialize by setting iov_or_mp to point to the current iovec or mp, * and by setting current_offset to an offset within the current iovec or mp. */ void crypto_init_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset) { offset_t offset; switch (out->cd_format) { case CRYPTO_DATA_RAW: *current_offset = out->cd_offset; break; case CRYPTO_DATA_UIO: { zfs_uio_t *uiop = out->cd_uio; uint_t vec_idx; offset = out->cd_offset; offset = zfs_uio_index_at_offset(uiop, offset, &vec_idx); *current_offset = offset; *iov_or_mp = (void *)(uintptr_t)vec_idx; break; } } /* end switch */ } /* * Get pointers for where in the output to copy a block of encrypted or * decrypted data. The iov_or_mp argument stores a pointer to the current * iovec or mp, and offset stores an offset into the current iovec or mp. */ void crypto_get_ptrs(crypto_data_t *out, void **iov_or_mp, offset_t *current_offset, uint8_t **out_data_1, size_t *out_data_1_len, uint8_t **out_data_2, size_t amt) { offset_t offset; switch (out->cd_format) { case CRYPTO_DATA_RAW: { iovec_t *iov; offset = *current_offset; iov = &out->cd_raw; if ((offset + amt) <= iov->iov_len) { /* one block fits */ *out_data_1 = (uint8_t *)iov->iov_base + offset; *out_data_1_len = amt; *out_data_2 = NULL; *current_offset = offset + amt; } break; } case CRYPTO_DATA_UIO: { zfs_uio_t *uio = out->cd_uio; offset_t offset; uint_t vec_idx; uint8_t *p; uint64_t iov_len; void *iov_base; offset = *current_offset; vec_idx = (uintptr_t)(*iov_or_mp); zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); p = (uint8_t *)iov_base + offset; *out_data_1 = p; if (offset + amt <= iov_len) { /* can fit one block into this iov */ *out_data_1_len = amt; *out_data_2 = NULL; *current_offset = offset + amt; } else { /* one block spans two iovecs */ *out_data_1_len = iov_len - offset; if (vec_idx == zfs_uio_iovcnt(uio)) { *out_data_2 = NULL; return; } vec_idx++; zfs_uio_iov_at_index(uio, vec_idx, &iov_base, &iov_len); *out_data_2 = (uint8_t *)iov_base; *current_offset = amt - *out_data_1_len; } *iov_or_mp = (void *)(uintptr_t)vec_idx; break; } } /* end switch */ } void crypto_free_mode_ctx(void *ctx) { common_ctx_t *common_ctx = (common_ctx_t *)ctx; - switch (common_ctx->cc_flags & - (ECB_MODE|CBC_MODE|CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) { - case ECB_MODE: - kmem_free(common_ctx, sizeof (ecb_ctx_t)); - break; - - case CBC_MODE: - kmem_free(common_ctx, sizeof (cbc_ctx_t)); - break; - - case CTR_MODE: - kmem_free(common_ctx, sizeof (ctr_ctx_t)); - break; - + switch (common_ctx->cc_flags & (CCM_MODE|GCM_MODE)) { case CCM_MODE: if (((ccm_ctx_t *)ctx)->ccm_pt_buf != NULL) vmem_free(((ccm_ctx_t *)ctx)->ccm_pt_buf, ((ccm_ctx_t *)ctx)->ccm_data_len); kmem_free(ctx, sizeof (ccm_ctx_t)); break; case GCM_MODE: - case GMAC_MODE: gcm_clear_ctx((gcm_ctx_t *)ctx); kmem_free(ctx, sizeof (gcm_ctx_t)); + break; + + default: + __builtin_unreachable(); } } static void * explicit_memset(void *s, int c, size_t n) { memset(s, c, n); __asm__ __volatile__("" :: "r"(s) : "memory"); return (s); } /* * Clear sensitive data in the context and free allocated memory. * * ctx->gcm_remainder may contain a plaintext remainder. ctx->gcm_H and * ctx->gcm_Htable contain the hash sub key which protects authentication. * ctx->gcm_pt_buf contains the plaintext result of decryption. * * Although extremely unlikely, ctx->gcm_J0 and ctx->gcm_tmp could be used for * a known plaintext attack, they consist of the IV and the first and last * counter respectively. If they should be cleared is debatable. */ void gcm_clear_ctx(gcm_ctx_t *ctx) { explicit_memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder)); explicit_memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H)); #if defined(CAN_USE_GCM_ASM) if (ctx->gcm_use_avx == B_TRUE) { ASSERT3P(ctx->gcm_Htable, !=, NULL); memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len); kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len); } #endif if (ctx->gcm_pt_buf != NULL) { memset(ctx->gcm_pt_buf, 0, ctx->gcm_pt_buf_len); vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len); } /* Optional */ explicit_memset(ctx->gcm_J0, 0, sizeof (ctx->gcm_J0)); explicit_memset(ctx->gcm_tmp, 0, sizeof (ctx->gcm_tmp)); } diff --git a/module/icp/include/aes/aes_impl.h b/module/icp/include/aes/aes_impl.h index 66eb4a6c8fb6..d26ced58ff1e 100644 --- a/module/icp/include/aes/aes_impl.h +++ b/module/icp/include/aes/aes_impl.h @@ -1,221 +1,216 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _AES_IMPL_H #define _AES_IMPL_H /* * Common definitions used by AES. */ #ifdef __cplusplus extern "C" { #endif #include #include #include /* Similar to sysmacros.h IS_P2ALIGNED, but checks two pointers: */ #define IS_P2ALIGNED2(v, w, a) \ ((((uintptr_t)(v) | (uintptr_t)(w)) & ((uintptr_t)(a) - 1)) == 0) #define AES_BLOCK_LEN 16 /* bytes */ /* Round constant length, in number of 32-bit elements: */ #define RC_LENGTH (5 * ((AES_BLOCK_LEN) / 4 - 2)) #define AES_COPY_BLOCK(src, dst) \ (dst)[0] = (src)[0]; \ (dst)[1] = (src)[1]; \ (dst)[2] = (src)[2]; \ (dst)[3] = (src)[3]; \ (dst)[4] = (src)[4]; \ (dst)[5] = (src)[5]; \ (dst)[6] = (src)[6]; \ (dst)[7] = (src)[7]; \ (dst)[8] = (src)[8]; \ (dst)[9] = (src)[9]; \ (dst)[10] = (src)[10]; \ (dst)[11] = (src)[11]; \ (dst)[12] = (src)[12]; \ (dst)[13] = (src)[13]; \ (dst)[14] = (src)[14]; \ (dst)[15] = (src)[15] #define AES_XOR_BLOCK(src, dst) \ (dst)[0] ^= (src)[0]; \ (dst)[1] ^= (src)[1]; \ (dst)[2] ^= (src)[2]; \ (dst)[3] ^= (src)[3]; \ (dst)[4] ^= (src)[4]; \ (dst)[5] ^= (src)[5]; \ (dst)[6] ^= (src)[6]; \ (dst)[7] ^= (src)[7]; \ (dst)[8] ^= (src)[8]; \ (dst)[9] ^= (src)[9]; \ (dst)[10] ^= (src)[10]; \ (dst)[11] ^= (src)[11]; \ (dst)[12] ^= (src)[12]; \ (dst)[13] ^= (src)[13]; \ (dst)[14] ^= (src)[14]; \ (dst)[15] ^= (src)[15] /* AES key size definitions */ #define AES_MINBITS 128 #define AES_MAXBITS 256 /* AES key schedule may be implemented with 32- or 64-bit elements: */ #define AES_32BIT_KS 32 #define AES_64BIT_KS 64 #define MAX_AES_NR 14 /* Maximum number of rounds */ #define MAX_AES_NB 4 /* Number of columns comprising a state */ typedef union { #ifdef sun4u uint64_t ks64[((MAX_AES_NR) + 1) * (MAX_AES_NB)]; #endif uint32_t ks32[((MAX_AES_NR) + 1) * (MAX_AES_NB)]; } aes_ks_t; typedef struct aes_impl_ops aes_impl_ops_t; /* * The absolute offset of the encr_ks (0) and the nr (504) fields are hard * coded in aesni-gcm-x86_64, so please don't change (or adjust accordingly). */ typedef struct aes_key aes_key_t; struct aes_key { aes_ks_t encr_ks; /* encryption key schedule */ aes_ks_t decr_ks; /* decryption key schedule */ #ifdef __amd64 long double align128; /* Align fields above for Intel AES-NI */ #endif /* __amd64 */ const aes_impl_ops_t *ops; /* ops associated with this schedule */ int nr; /* number of rounds (10, 12, or 14) */ int type; /* key schedule size (32 or 64 bits) */ }; /* * Core AES functions. * ks and keysched are pointers to aes_key_t. * They are declared void* as they are intended to be opaque types. * Use function aes_alloc_keysched() to allocate memory for ks and keysched. */ extern void *aes_alloc_keysched(size_t *size, int kmflag); extern void aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched); extern int aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct); extern int aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt); /* * AES mode functions. * The first 2 functions operate on 16-byte AES blocks. */ extern void aes_copy_block(uint8_t *in, uint8_t *out); extern void aes_xor_block(uint8_t *data, uint8_t *dst); /* Note: ctx is a pointer to aes_ctx_t defined in modes.h */ extern int aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length, crypto_data_t *out); extern int aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length, crypto_data_t *out); /* * The following definitions and declarations are only used by AES FIPS POST */ #ifdef _AES_IMPL typedef enum aes_mech_type { - AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ - AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ - AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ - AES_CTR_MECH_INFO_TYPE, /* SUN_CKM_AES_CTR */ AES_CCM_MECH_INFO_TYPE, /* SUN_CKM_AES_CCM */ AES_GCM_MECH_INFO_TYPE, /* SUN_CKM_AES_GCM */ - AES_GMAC_MECH_INFO_TYPE /* SUN_CKM_AES_GMAC */ } aes_mech_type_t; #endif /* _AES_IMPL */ /* * Methods used to define AES implementation * * @aes_gen_f Key generation * @aes_enc_f Function encrypts one block * @aes_dec_f Function decrypts one block * @aes_will_work_f Function tests whether method will function */ typedef void (*aes_generate_f)(aes_key_t *, const uint32_t *, int); typedef void (*aes_encrypt_f)(const uint32_t[], int, const uint32_t[4], uint32_t[4]); typedef void (*aes_decrypt_f)(const uint32_t[], int, const uint32_t[4], uint32_t[4]); typedef boolean_t (*aes_will_work_f)(void); #define AES_IMPL_NAME_MAX (16) struct aes_impl_ops { aes_generate_f generate; aes_encrypt_f encrypt; aes_decrypt_f decrypt; aes_will_work_f is_supported; boolean_t needs_byteswap; char name[AES_IMPL_NAME_MAX]; }; extern const aes_impl_ops_t aes_generic_impl; #if defined(__x86_64) extern const aes_impl_ops_t aes_x86_64_impl; /* These functions are used to execute amd64 instructions for AMD or Intel: */ extern ASMABI int rijndael_key_setup_enc_amd64(uint32_t rk[], const uint32_t cipherKey[], int keyBits); extern ASMABI int rijndael_key_setup_dec_amd64(uint32_t rk[], const uint32_t cipherKey[], int keyBits); extern ASMABI void aes_encrypt_amd64(const uint32_t rk[], int Nr, const uint32_t pt[4], uint32_t ct[4]); extern ASMABI void aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4], uint32_t pt[4]); #endif #if defined(__x86_64) && defined(HAVE_AES) extern const aes_impl_ops_t aes_aesni_impl; #endif /* * Initializes fastest implementation */ void aes_impl_init(void); /* * Returns optimal allowed AES implementation */ const struct aes_impl_ops *aes_impl_get_ops(void); #ifdef __cplusplus } #endif #endif /* _AES_IMPL_H */ diff --git a/module/icp/include/modes/modes.h b/module/icp/include/modes/modes.h index 950c1115f3e0..daa0335b5c3b 100644 --- a/module/icp/include/modes/modes.h +++ b/module/icp/include/modes/modes.h @@ -1,379 +1,279 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #ifndef _COMMON_CRYPTO_MODES_H #define _COMMON_CRYPTO_MODES_H #ifdef __cplusplus extern "C" { #endif #include #include #include /* * Does the build chain support all instructions needed for the GCM assembler * routines. AVX support should imply AES-NI and PCLMULQDQ, but make sure * anyhow. */ #if defined(__x86_64__) && defined(HAVE_AVX) && \ defined(HAVE_AES) && defined(HAVE_PCLMULQDQ) #define CAN_USE_GCM_ASM extern boolean_t gcm_avx_can_use_movbe; #endif -#define ECB_MODE 0x00000002 -#define CBC_MODE 0x00000004 -#define CTR_MODE 0x00000008 #define CCM_MODE 0x00000010 #define GCM_MODE 0x00000020 -#define GMAC_MODE 0x00000040 /* * cc_keysched: Pointer to key schedule. * * cc_keysched_len: Length of the key schedule. * * cc_remainder: This is for residual data, i.e. data that can't * be processed because there are too few bytes. * Must wait until more data arrives. * * cc_remainder_len: Number of bytes in cc_remainder. * * cc_iv: Scratch buffer that sometimes contains the IV. * * cc_lastp: Pointer to previous block of ciphertext. * * cc_copy_to: Pointer to where encrypted residual data needs * to be copied. * * cc_flags: PROVIDER_OWNS_KEY_SCHEDULE * When a context is freed, it is necessary * to know whether the key schedule was allocated * by the caller, or internally, e.g. an init routine. * If allocated by the latter, then it needs to be freed. * - * ECB_MODE, CBC_MODE, CTR_MODE, or CCM_MODE + * CCM_MODE */ struct common_ctx { void *cc_keysched; size_t cc_keysched_len; uint64_t cc_iv[2]; uint64_t cc_remainder[2]; size_t cc_remainder_len; uint8_t *cc_lastp; uint8_t *cc_copy_to; uint32_t cc_flags; }; typedef struct common_ctx common_ctx_t; -typedef struct ecb_ctx { - struct common_ctx ecb_common; - uint64_t ecb_lastblock[2]; -} ecb_ctx_t; - -#define ecb_keysched ecb_common.cc_keysched -#define ecb_keysched_len ecb_common.cc_keysched_len -#define ecb_iv ecb_common.cc_iv -#define ecb_remainder ecb_common.cc_remainder -#define ecb_remainder_len ecb_common.cc_remainder_len -#define ecb_lastp ecb_common.cc_lastp -#define ecb_copy_to ecb_common.cc_copy_to -#define ecb_flags ecb_common.cc_flags - -typedef struct cbc_ctx { - struct common_ctx cbc_common; - uint64_t cbc_lastblock[2]; -} cbc_ctx_t; - -#define cbc_keysched cbc_common.cc_keysched -#define cbc_keysched_len cbc_common.cc_keysched_len -#define cbc_iv cbc_common.cc_iv -#define cbc_remainder cbc_common.cc_remainder -#define cbc_remainder_len cbc_common.cc_remainder_len -#define cbc_lastp cbc_common.cc_lastp -#define cbc_copy_to cbc_common.cc_copy_to -#define cbc_flags cbc_common.cc_flags - -/* - * ctr_lower_mask Bit-mask for lower 8 bytes of counter block. - * ctr_upper_mask Bit-mask for upper 8 bytes of counter block. - */ -typedef struct ctr_ctx { - struct common_ctx ctr_common; - uint64_t ctr_lower_mask; - uint64_t ctr_upper_mask; - uint32_t ctr_tmp[4]; -} ctr_ctx_t; - -/* - * ctr_cb Counter block. - */ -#define ctr_keysched ctr_common.cc_keysched -#define ctr_keysched_len ctr_common.cc_keysched_len -#define ctr_cb ctr_common.cc_iv -#define ctr_remainder ctr_common.cc_remainder -#define ctr_remainder_len ctr_common.cc_remainder_len -#define ctr_lastp ctr_common.cc_lastp -#define ctr_copy_to ctr_common.cc_copy_to -#define ctr_flags ctr_common.cc_flags - /* * * ccm_mac_len: Stores length of the MAC in CCM mode. * ccm_mac_buf: Stores the intermediate value for MAC in CCM encrypt. * In CCM decrypt, stores the input MAC value. * ccm_data_len: Length of the plaintext for CCM mode encrypt, or * length of the ciphertext for CCM mode decrypt. * ccm_processed_data_len: * Length of processed plaintext in CCM mode encrypt, * or length of processed ciphertext for CCM mode decrypt. * ccm_processed_mac_len: * Length of MAC data accumulated in CCM mode decrypt. * * ccm_pt_buf: Only used in CCM mode decrypt. It stores the * decrypted plaintext to be returned when * MAC verification succeeds in decrypt_final. * Memory for this should be allocated in the AES module. * */ typedef struct ccm_ctx { struct common_ctx ccm_common; uint32_t ccm_tmp[4]; size_t ccm_mac_len; uint64_t ccm_mac_buf[2]; size_t ccm_data_len; size_t ccm_processed_data_len; size_t ccm_processed_mac_len; uint8_t *ccm_pt_buf; uint64_t ccm_mac_input_buf[2]; uint64_t ccm_counter_mask; } ccm_ctx_t; #define ccm_keysched ccm_common.cc_keysched #define ccm_keysched_len ccm_common.cc_keysched_len #define ccm_cb ccm_common.cc_iv #define ccm_remainder ccm_common.cc_remainder #define ccm_remainder_len ccm_common.cc_remainder_len #define ccm_lastp ccm_common.cc_lastp #define ccm_copy_to ccm_common.cc_copy_to #define ccm_flags ccm_common.cc_flags /* * gcm_tag_len: Length of authentication tag. * * gcm_ghash: Stores output from the GHASH function. * * gcm_processed_data_len: * Length of processed plaintext (encrypt) or * length of processed ciphertext (decrypt). * * gcm_pt_buf: Stores the decrypted plaintext returned by * decrypt_final when the computed authentication * tag matches the user supplied tag. * * gcm_pt_buf_len: Length of the plaintext buffer. * * gcm_H: Subkey. * * gcm_Htable: Pre-computed and pre-shifted H, H^2, ... H^6 for the * Karatsuba Algorithm in host byte order. * * gcm_J0: Pre-counter block generated from the IV. * * gcm_len_a_len_c: 64-bit representations of the bit lengths of * AAD and ciphertext. */ typedef struct gcm_ctx { struct common_ctx gcm_common; size_t gcm_tag_len; size_t gcm_processed_data_len; size_t gcm_pt_buf_len; uint32_t gcm_tmp[4]; /* * The offset of gcm_Htable relative to gcm_ghash, (32), is hard coded * in aesni-gcm-x86_64.S, so please don't change (or adjust there). */ uint64_t gcm_ghash[2]; uint64_t gcm_H[2]; #ifdef CAN_USE_GCM_ASM uint64_t *gcm_Htable; size_t gcm_htab_len; #endif uint64_t gcm_J0[2]; uint64_t gcm_len_a_len_c[2]; uint8_t *gcm_pt_buf; #ifdef CAN_USE_GCM_ASM boolean_t gcm_use_avx; #endif } gcm_ctx_t; #define gcm_keysched gcm_common.cc_keysched #define gcm_keysched_len gcm_common.cc_keysched_len #define gcm_cb gcm_common.cc_iv #define gcm_remainder gcm_common.cc_remainder #define gcm_remainder_len gcm_common.cc_remainder_len #define gcm_lastp gcm_common.cc_lastp #define gcm_copy_to gcm_common.cc_copy_to #define gcm_flags gcm_common.cc_flags -#define AES_GMAC_IV_LEN 12 -#define AES_GMAC_TAG_BITS 128 - void gcm_clear_ctx(gcm_ctx_t *ctx); typedef struct aes_ctx { union { - ecb_ctx_t acu_ecb; - cbc_ctx_t acu_cbc; - ctr_ctx_t acu_ctr; ccm_ctx_t acu_ccm; gcm_ctx_t acu_gcm; } acu; } aes_ctx_t; -#define ac_flags acu.acu_ecb.ecb_common.cc_flags -#define ac_remainder_len acu.acu_ecb.ecb_common.cc_remainder_len -#define ac_keysched acu.acu_ecb.ecb_common.cc_keysched -#define ac_keysched_len acu.acu_ecb.ecb_common.cc_keysched_len -#define ac_iv acu.acu_ecb.ecb_common.cc_iv -#define ac_lastp acu.acu_ecb.ecb_common.cc_lastp +#define ac_flags acu.acu_ccm.ccm_common.cc_flags +#define ac_remainder_len acu.acu_ccm.ccm_common.cc_remainder_len +#define ac_keysched acu.acu_ccm.ccm_common.cc_keysched +#define ac_keysched_len acu.acu_ccm.ccm_common.cc_keysched_len +#define ac_iv acu.acu_ccm.ccm_common.cc_iv +#define ac_lastp acu.acu_ccm.ccm_common.cc_lastp #define ac_pt_buf acu.acu_ccm.ccm_pt_buf #define ac_mac_len acu.acu_ccm.ccm_mac_len #define ac_data_len acu.acu_ccm.ccm_data_len #define ac_processed_mac_len acu.acu_ccm.ccm_processed_mac_len #define ac_processed_data_len acu.acu_ccm.ccm_processed_data_len #define ac_tag_len acu.acu_gcm.gcm_tag_len -extern int ecb_cipher_contiguous_blocks(ecb_ctx_t *, char *, size_t, - crypto_data_t *, size_t, int (*cipher)(const void *, const uint8_t *, - uint8_t *)); - -extern int cbc_encrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t, - crypto_data_t *, size_t, - int (*encrypt)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)); - -extern int cbc_decrypt_contiguous_blocks(cbc_ctx_t *, char *, size_t, - crypto_data_t *, size_t, - int (*decrypt)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)); - -extern int ctr_mode_contiguous_blocks(ctr_ctx_t *, char *, size_t, - crypto_data_t *, size_t, - int (*cipher)(const void *, const uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)); - extern int ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *, char *, size_t, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *, char *, size_t, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); int ccm_encrypt_final(ccm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); int gcm_encrypt_final(gcm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int ccm_decrypt_final(ccm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int gcm_decrypt_final(gcm_ctx_t *, crypto_data_t *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); -extern int ctr_mode_final(ctr_ctx_t *, crypto_data_t *, - int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)); - -extern int cbc_init_ctx(cbc_ctx_t *, char *, size_t, size_t, - void (*copy_block)(uint8_t *, uint64_t *)); - -extern int ctr_init_ctx(ctr_ctx_t *, ulong_t, uint8_t *, - void (*copy_block)(uint8_t *, uint8_t *)); - extern int ccm_init_ctx(ccm_ctx_t *, char *, int, boolean_t, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); extern int gcm_init_ctx(gcm_ctx_t *, char *, size_t, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), void (*copy_block)(uint8_t *, uint8_t *), void (*xor_block)(uint8_t *, uint8_t *)); -extern int gmac_init_ctx(gcm_ctx_t *, char *, size_t, - int (*encrypt_block)(const void *, const uint8_t *, uint8_t *), - void (*copy_block)(uint8_t *, uint8_t *), - void (*xor_block)(uint8_t *, uint8_t *)); - extern void calculate_ccm_mac(ccm_ctx_t *, uint8_t *, int (*encrypt_block)(const void *, const uint8_t *, uint8_t *)); extern void gcm_mul(uint64_t *, uint64_t *, uint64_t *); extern void crypto_init_ptrs(crypto_data_t *, void **, offset_t *); extern void crypto_get_ptrs(crypto_data_t *, void **, offset_t *, uint8_t **, size_t *, uint8_t **, size_t); -extern void *ecb_alloc_ctx(int); -extern void *cbc_alloc_ctx(int); -extern void *ctr_alloc_ctx(int); extern void *ccm_alloc_ctx(int); extern void *gcm_alloc_ctx(int); -extern void *gmac_alloc_ctx(int); extern void crypto_free_mode_ctx(void *); #ifdef __cplusplus } #endif #endif /* _COMMON_CRYPTO_MODES_H */ diff --git a/module/icp/io/aes.c b/module/icp/io/aes.c index 522c436497bc..a4ef1716710e 100644 --- a/module/icp/io/aes.c +++ b/module/icp/io/aes.c @@ -1,1323 +1,987 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. */ /* * AES provider for the Kernel Cryptographic Framework (KCF) */ #include #include #include #include #include #include #define _AES_IMPL #include #include /* * Mechanism info structure passed to KCF during registration. */ static const crypto_mech_info_t aes_mech_info_tab[] = { - /* AES_ECB */ - {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, - /* AES_CBC */ - {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, - /* AES_CTR */ - {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, /* AES_CCM */ {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, /* AES_GCM */ {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE, CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC}, - /* AES_GMAC */ - {SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE, - CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | - CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC | - CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC}, }; static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t); static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t, boolean_t); static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, crypto_mechanism_t *, crypto_key_t *, int, boolean_t); static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *); static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *); static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_encrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, crypto_data_t *); static int aes_decrypt_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); static const crypto_cipher_ops_t aes_cipher_ops = { .encrypt_init = aes_encrypt_init, .encrypt = aes_encrypt, .encrypt_update = aes_encrypt_update, .encrypt_final = aes_encrypt_final, .encrypt_atomic = aes_encrypt_atomic, .decrypt_init = aes_decrypt_init, .decrypt = aes_decrypt, .decrypt_update = aes_decrypt_update, .decrypt_final = aes_decrypt_final, .decrypt_atomic = aes_decrypt_atomic }; -static int aes_mac_atomic(crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, - crypto_data_t *, crypto_spi_ctx_template_t); -static int aes_mac_verify_atomic(crypto_mechanism_t *, crypto_key_t *, - crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t); - -static const crypto_mac_ops_t aes_mac_ops = { - .mac_init = NULL, - .mac = NULL, - .mac_update = NULL, - .mac_final = NULL, - .mac_atomic = aes_mac_atomic, - .mac_verify_atomic = aes_mac_verify_atomic -}; - static int aes_create_ctx_template(crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, size_t *); static int aes_free_context(crypto_ctx_t *); static const crypto_ctx_ops_t aes_ctx_ops = { .create_ctx_template = aes_create_ctx_template, .free_context = aes_free_context }; static const crypto_ops_t aes_crypto_ops = { NULL, &aes_cipher_ops, - &aes_mac_ops, + NULL, &aes_ctx_ops, }; static const crypto_provider_info_t aes_prov_info = { "AES Software Provider", &aes_crypto_ops, sizeof (aes_mech_info_tab) / sizeof (crypto_mech_info_t), aes_mech_info_tab }; static crypto_kcf_provider_handle_t aes_prov_handle = 0; -static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW }; int aes_mod_init(void) { /* Determine the fastest available implementation. */ aes_impl_init(); gcm_impl_init(); /* Register with KCF. If the registration fails, remove the module. */ if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) return (EACCES); return (0); } int aes_mod_fini(void) { /* Unregister from KCF if module is registered */ if (aes_prov_handle != 0) { if (crypto_unregister_provider(aes_prov_handle)) return (EBUSY); aes_prov_handle = 0; } return (0); } static int aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx) { void *p = NULL; boolean_t param_required = B_TRUE; size_t param_len; void *(*alloc_fun)(int); int rv = CRYPTO_SUCCESS; switch (mechanism->cm_type) { - case AES_ECB_MECH_INFO_TYPE: - param_required = B_FALSE; - alloc_fun = ecb_alloc_ctx; - break; - case AES_CBC_MECH_INFO_TYPE: - param_len = AES_BLOCK_LEN; - alloc_fun = cbc_alloc_ctx; - break; - case AES_CTR_MECH_INFO_TYPE: - param_len = sizeof (CK_AES_CTR_PARAMS); - alloc_fun = ctr_alloc_ctx; - break; case AES_CCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_CCM_PARAMS); alloc_fun = ccm_alloc_ctx; break; case AES_GCM_MECH_INFO_TYPE: param_len = sizeof (CK_AES_GCM_PARAMS); alloc_fun = gcm_alloc_ctx; break; - case AES_GMAC_MECH_INFO_TYPE: - param_len = sizeof (CK_AES_GMAC_PARAMS); - alloc_fun = gmac_alloc_ctx; - break; default: - rv = CRYPTO_MECHANISM_INVALID; - return (rv); + __builtin_unreachable(); } if (param_required && mechanism->cm_param != NULL && mechanism->cm_param_len != param_len) { rv = CRYPTO_MECHANISM_PARAM_INVALID; } if (ctx != NULL) { p = (alloc_fun)(KM_SLEEP); *ctx = p; } return (rv); } /* * Initialize key schedules for AES */ static int init_keysched(crypto_key_t *key, void *newbie) { if (key->ck_length < AES_MINBITS || key->ck_length > AES_MAXBITS) { return (CRYPTO_KEY_SIZE_RANGE); } /* key length must be either 128, 192, or 256 */ if ((key->ck_length & 63) != 0) return (CRYPTO_KEY_SIZE_RANGE); aes_init_keysched(key->ck_data, key->ck_length, newbie); return (CRYPTO_SUCCESS); } static int aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t template) { return (aes_common_init(ctx, mechanism, key, template, B_TRUE)); } static int aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t template) { return (aes_common_init(ctx, mechanism, key, template, B_FALSE)); } /* * KCF software provider encrypt entry points. */ static int aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t template, boolean_t is_encrypt_init) { aes_ctx_t *aes_ctx; int rv; if ((rv = aes_check_mech_param(mechanism, &aes_ctx)) != CRYPTO_SUCCESS) return (rv); rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, KM_SLEEP, is_encrypt_init); if (rv != CRYPTO_SUCCESS) { crypto_free_mode_ctx(aes_ctx); return (rv); } ctx->cc_provider_private = aes_ctx; return (CRYPTO_SUCCESS); } -static void -aes_copy_block64(uint8_t *in, uint64_t *out) -{ - if (IS_P2ALIGNED(in, sizeof (uint64_t))) { - /* LINTED: pointer alignment */ - out[0] = *(uint64_t *)&in[0]; - /* LINTED: pointer alignment */ - out[1] = *(uint64_t *)&in[8]; - } else { - uint8_t *iv8 = (uint8_t *)&out[0]; - - AES_COPY_BLOCK(in, iv8); - } -} - - static int aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, crypto_data_t *ciphertext) { int ret = CRYPTO_FAILED; aes_ctx_t *aes_ctx; size_t saved_length, saved_offset, length_needed; ASSERT(ctx->cc_provider_private != NULL); aes_ctx = ctx->cc_provider_private; - /* - * For block ciphers, plaintext must be a multiple of AES block size. - * This test is only valid for ciphers whose blocksize is a power of 2. - */ - if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) - == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) - return (CRYPTO_DATA_LEN_RANGE); - ASSERT(ciphertext != NULL); /* * We need to just return the length needed to store the output. * We should not destroy the context for the following case. */ - switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) { + switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) { case CCM_MODE: length_needed = plaintext->cd_length + aes_ctx->ac_mac_len; break; case GCM_MODE: length_needed = plaintext->cd_length + aes_ctx->ac_tag_len; break; - case GMAC_MODE: - if (plaintext->cd_length != 0) - return (CRYPTO_ARGUMENTS_BAD); - - length_needed = aes_ctx->ac_tag_len; - break; default: - length_needed = plaintext->cd_length; + __builtin_unreachable(); } if (ciphertext->cd_length < length_needed) { ciphertext->cd_length = length_needed; return (CRYPTO_BUFFER_TOO_SMALL); } saved_length = ciphertext->cd_length; saved_offset = ciphertext->cd_offset; /* * Do an update on the specified input data. */ ret = aes_encrypt_update(ctx, plaintext, ciphertext); if (ret != CRYPTO_SUCCESS) { return (ret); } /* * For CCM mode, aes_ccm_encrypt_final() will take care of any * left-over unprocessed data, and compute the MAC */ if (aes_ctx->ac_flags & CCM_MODE) { /* * ccm_encrypt_final() will compute the MAC and append * it to existing ciphertext. So, need to adjust the left over * length value accordingly */ /* order of following 2 lines MUST not be reversed */ ciphertext->cd_offset = ciphertext->cd_length; ciphertext->cd_length = saved_length - ciphertext->cd_length; ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) { return (ret); } if (plaintext != ciphertext) { ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } ciphertext->cd_offset = saved_offset; - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { /* * gcm_encrypt_final() will compute the MAC and append * it to existing ciphertext. So, need to adjust the left over * length value accordingly */ /* order of following 2 lines MUST not be reversed */ ciphertext->cd_offset = ciphertext->cd_length; ciphertext->cd_length = saved_length - ciphertext->cd_length; ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) { return (ret); } if (plaintext != ciphertext) { ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } ciphertext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); (void) aes_free_context(ctx); return (ret); } static int aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, crypto_data_t *plaintext) { int ret = CRYPTO_FAILED; aes_ctx_t *aes_ctx; off_t saved_offset; size_t saved_length, length_needed; ASSERT(ctx->cc_provider_private != NULL); aes_ctx = ctx->cc_provider_private; - /* - * For block ciphers, plaintext must be a multiple of AES block size. - * This test is only valid for ciphers whose blocksize is a power of 2. - */ - if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) - == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) { - return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - } - ASSERT(plaintext != NULL); /* * Return length needed to store the output. * Do not destroy context when plaintext buffer is too small. * * CCM: plaintext is MAC len smaller than cipher text * GCM: plaintext is TAG len smaller than cipher text - * GMAC: plaintext length must be zero */ - switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) { + switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) { case CCM_MODE: length_needed = aes_ctx->ac_processed_data_len; break; case GCM_MODE: length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len; break; - case GMAC_MODE: - if (plaintext->cd_length != 0) - return (CRYPTO_ARGUMENTS_BAD); - - length_needed = 0; - break; default: - length_needed = ciphertext->cd_length; + __builtin_unreachable(); } if (plaintext->cd_length < length_needed) { plaintext->cd_length = length_needed; return (CRYPTO_BUFFER_TOO_SMALL); } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; /* * Do an update on the specified input data. */ ret = aes_decrypt_update(ctx, ciphertext, plaintext); if (ret != CRYPTO_SUCCESS) { goto cleanup; } if (aes_ctx->ac_flags & CCM_MODE) { ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len); ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len); /* order of following 2 lines MUST not be reversed */ plaintext->cd_offset = plaintext->cd_length; plaintext->cd_length = saved_length - plaintext->cd_length; ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret == CRYPTO_SUCCESS) { if (plaintext != ciphertext) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { /* order of following 2 lines MUST not be reversed */ plaintext->cd_offset = plaintext->cd_length; plaintext->cd_length = saved_length - plaintext->cd_length; ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret == CRYPTO_SUCCESS) { if (plaintext != ciphertext) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; } ASSERT(aes_ctx->ac_remainder_len == 0); cleanup: (void) aes_free_context(ctx); return (ret); } static int aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, crypto_data_t *ciphertext) { off_t saved_offset; size_t saved_length, out_len; int ret = CRYPTO_SUCCESS; aes_ctx_t *aes_ctx; ASSERT(ctx->cc_provider_private != NULL); aes_ctx = ctx->cc_provider_private; ASSERT(ciphertext != NULL); /* compute number of bytes that will hold the ciphertext */ out_len = aes_ctx->ac_remainder_len; out_len += plaintext->cd_length; out_len &= ~(AES_BLOCK_LEN - 1); /* return length needed to store the output */ if (ciphertext->cd_length < out_len) { ciphertext->cd_length = out_len; return (CRYPTO_BUFFER_TOO_SMALL); } saved_offset = ciphertext->cd_offset; saved_length = ciphertext->cd_length; /* * Do the AES update on the specified input data. */ switch (plaintext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(ctx->cc_provider_private, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(ctx->cc_provider_private, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } - /* - * Since AES counter mode is a stream cipher, we call - * ctr_mode_final() to pick up any remaining bytes. - * It is an internal function that does not destroy - * the context like *normal* final routines. - */ - if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) { - ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, - ciphertext, aes_encrypt_block); - } - if (ret == CRYPTO_SUCCESS) { if (plaintext != ciphertext) ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } else { ciphertext->cd_length = saved_length; } ciphertext->cd_offset = saved_offset; return (ret); } static int aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, crypto_data_t *plaintext) { off_t saved_offset; - size_t saved_length, out_len; + size_t saved_length; int ret = CRYPTO_SUCCESS; - aes_ctx_t *aes_ctx; ASSERT(ctx->cc_provider_private != NULL); - aes_ctx = ctx->cc_provider_private; ASSERT(plaintext != NULL); - /* - * Compute number of bytes that will hold the plaintext. - * This is not necessary for CCM, GCM, and GMAC since these - * mechanisms never return plaintext for update operations. - */ - if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) { - out_len = aes_ctx->ac_remainder_len; - out_len += ciphertext->cd_length; - out_len &= ~(AES_BLOCK_LEN - 1); - - /* return length needed to store the output */ - if (plaintext->cd_length < out_len) { - plaintext->cd_length = out_len; - return (CRYPTO_BUFFER_TOO_SMALL); - } - } - saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; /* * Do the AES update on the specified input data. */ switch (ciphertext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(ctx->cc_provider_private, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(ctx->cc_provider_private, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } - /* - * Since AES counter mode is a stream cipher, we call - * ctr_mode_final() to pick up any remaining bytes. - * It is an internal function that does not destroy - * the context like *normal* final routines. - */ - if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) { - ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext, - aes_encrypt_block); - if (ret == CRYPTO_DATA_LEN_RANGE) - ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; - } - if (ret == CRYPTO_SUCCESS) { if (ciphertext != plaintext) plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; return (ret); } static int aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) { aes_ctx_t *aes_ctx; int ret; ASSERT(ctx->cc_provider_private != NULL); aes_ctx = ctx->cc_provider_private; if (data->cd_format != CRYPTO_DATA_RAW && data->cd_format != CRYPTO_DATA_UIO) { return (CRYPTO_ARGUMENTS_BAD); } - if (aes_ctx->ac_flags & CTR_MODE) { - if (aes_ctx->ac_remainder_len > 0) { - ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data, - aes_encrypt_block); - if (ret != CRYPTO_SUCCESS) - return (ret); - } - } else if (aes_ctx->ac_flags & CCM_MODE) { + if (aes_ctx->ac_flags & CCM_MODE) { ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) { return (ret); } - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { size_t saved_offset = data->cd_offset; ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) { return (ret); } data->cd_length = data->cd_offset - saved_offset; data->cd_offset = saved_offset; - } else { - /* - * There must be no unprocessed plaintext. - * This happens if the length of the last data is - * not a multiple of the AES block length. - */ - if (aes_ctx->ac_remainder_len > 0) { - return (CRYPTO_DATA_LEN_RANGE); - } - data->cd_length = 0; } (void) aes_free_context(ctx); return (CRYPTO_SUCCESS); } static int aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data) { aes_ctx_t *aes_ctx; int ret; off_t saved_offset; size_t saved_length; ASSERT(ctx->cc_provider_private != NULL); aes_ctx = ctx->cc_provider_private; if (data->cd_format != CRYPTO_DATA_RAW && data->cd_format != CRYPTO_DATA_UIO) { return (CRYPTO_ARGUMENTS_BAD); } /* * There must be no unprocessed ciphertext. * This happens if the length of the last ciphertext is * not a multiple of the AES block length. */ - if (aes_ctx->ac_remainder_len > 0) { - if ((aes_ctx->ac_flags & CTR_MODE) == 0) - return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - else { - ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data, - aes_encrypt_block); - if (ret == CRYPTO_DATA_LEN_RANGE) - ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; - if (ret != CRYPTO_SUCCESS) - return (ret); - } - } + if (aes_ctx->ac_remainder_len > 0) + return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); if (aes_ctx->ac_flags & CCM_MODE) { /* * This is where all the plaintext is returned, make sure * the plaintext buffer is big enough */ size_t pt_len = aes_ctx->ac_data_len; if (data->cd_length < pt_len) { data->cd_length = pt_len; return (CRYPTO_BUFFER_TOO_SMALL); } ASSERT(aes_ctx->ac_processed_data_len == pt_len); ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len); saved_offset = data->cd_offset; saved_length = data->cd_length; ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret == CRYPTO_SUCCESS) { data->cd_length = data->cd_offset - saved_offset; } else { data->cd_length = saved_length; } data->cd_offset = saved_offset; if (ret != CRYPTO_SUCCESS) { return (ret); } - } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx->ac_flags & GCM_MODE) { /* * This is where all the plaintext is returned, make sure * the plaintext buffer is big enough */ gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx; size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len; if (data->cd_length < pt_len) { data->cd_length = pt_len; return (CRYPTO_BUFFER_TOO_SMALL); } saved_offset = data->cd_offset; saved_length = data->cd_length; ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret == CRYPTO_SUCCESS) { data->cd_length = data->cd_offset - saved_offset; } else { data->cd_length = saved_length; } data->cd_offset = saved_offset; if (ret != CRYPTO_SUCCESS) { return (ret); } } - if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) { - data->cd_length = 0; - } - (void) aes_free_context(ctx); return (CRYPTO_SUCCESS); } static int aes_encrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(ciphertext != NULL); - /* - * CTR, CCM, GCM, and GMAC modes do not require that plaintext - * be a multiple of AES block size. - */ - switch (mechanism->cm_type) { - case AES_CTR_MECH_INFO_TYPE: - case AES_CCM_MECH_INFO_TYPE: - case AES_GCM_MECH_INFO_TYPE: - case AES_GMAC_MECH_INFO_TYPE: - break; - default: - if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) - return (CRYPTO_DATA_LEN_RANGE); - } - if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_TRUE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_mac_len; break; - case AES_GMAC_MECH_INFO_TYPE: - if (plaintext->cd_length != 0) - return (CRYPTO_ARGUMENTS_BAD); - zfs_fallthrough; case AES_GCM_MECH_INFO_TYPE: length_needed = plaintext->cd_length + aes_ctx.ac_tag_len; break; default: - length_needed = plaintext->cd_length; + __builtin_unreachable(); } /* return size of buffer needed to store output */ if (ciphertext->cd_length < length_needed) { ciphertext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = ciphertext->cd_offset; saved_length = ciphertext->cd_length; /* * Do an update on the specified input data. */ switch (plaintext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext, aes_encrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); - } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE || - mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) { + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx, ciphertext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); if (ret != CRYPTO_SUCCESS) goto out; ASSERT(aes_ctx.ac_remainder_len == 0); - } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { - if (aes_ctx.ac_remainder_len > 0) { - ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx, - ciphertext, aes_encrypt_block); - if (ret != CRYPTO_SUCCESS) - goto out; - } } else { ASSERT(aes_ctx.ac_remainder_len == 0); } if (plaintext != ciphertext) { ciphertext->cd_length = ciphertext->cd_offset - saved_offset; } } else { ciphertext->cd_length = saved_length; } ciphertext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } - if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) { + if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } static int aes_decrypt_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, crypto_spi_ctx_template_t template) { aes_ctx_t aes_ctx; off_t saved_offset; size_t saved_length; size_t length_needed; int ret; memset(&aes_ctx, 0, sizeof (aes_ctx_t)); ASSERT(plaintext != NULL); - /* - * CCM, GCM, CTR, and GMAC modes do not require that ciphertext - * be a multiple of AES block size. - */ - switch (mechanism->cm_type) { - case AES_CTR_MECH_INFO_TYPE: - case AES_CCM_MECH_INFO_TYPE: - case AES_GCM_MECH_INFO_TYPE: - case AES_GMAC_MECH_INFO_TYPE: - break; - default: - if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) - return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); - } - if ((ret = aes_check_mech_param(mechanism, NULL)) != CRYPTO_SUCCESS) return (ret); ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, KM_SLEEP, B_FALSE); if (ret != CRYPTO_SUCCESS) return (ret); switch (mechanism->cm_type) { case AES_CCM_MECH_INFO_TYPE: length_needed = aes_ctx.ac_data_len; break; case AES_GCM_MECH_INFO_TYPE: length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len; break; - case AES_GMAC_MECH_INFO_TYPE: - if (plaintext->cd_length != 0) - return (CRYPTO_ARGUMENTS_BAD); - length_needed = 0; - break; default: - length_needed = ciphertext->cd_length; + __builtin_unreachable(); } /* return size of buffer needed to store output */ if (plaintext->cd_length < length_needed) { plaintext->cd_length = length_needed; ret = CRYPTO_BUFFER_TOO_SMALL; goto out; } saved_offset = plaintext->cd_offset; saved_length = plaintext->cd_length; /* * Do an update on the specified input data. */ switch (ciphertext->cd_format) { case CRYPTO_DATA_RAW: ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; case CRYPTO_DATA_UIO: ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext, aes_decrypt_contiguous_blocks); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) { ASSERT(aes_ctx.ac_processed_data_len == aes_ctx.ac_data_len); ASSERT(aes_ctx.ac_processed_mac_len == aes_ctx.ac_mac_len); ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } - } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE || - mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) { + } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) { ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx, plaintext, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); ASSERT(aes_ctx.ac_remainder_len == 0); if ((ret == CRYPTO_SUCCESS) && (ciphertext != plaintext)) { plaintext->cd_length = plaintext->cd_offset - saved_offset; } else { plaintext->cd_length = saved_length; } - } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { - ASSERT(aes_ctx.ac_remainder_len == 0); - if (ciphertext != plaintext) - plaintext->cd_length = - plaintext->cd_offset - saved_offset; - } else { - if (aes_ctx.ac_remainder_len > 0) { - ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx, - plaintext, aes_encrypt_block); - if (ret == CRYPTO_DATA_LEN_RANGE) - ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE; - if (ret != CRYPTO_SUCCESS) - goto out; - } - if (ciphertext != plaintext) - plaintext->cd_length = - plaintext->cd_offset - saved_offset; - } + } else + __builtin_unreachable(); } else { plaintext->cd_length = saved_length; } plaintext->cd_offset = saved_offset; out: if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(aes_ctx.ac_keysched, 0, aes_ctx.ac_keysched_len); kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); } if (aes_ctx.ac_flags & CCM_MODE) { if (aes_ctx.ac_pt_buf != NULL) { vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len); } - } else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) { + } else if (aes_ctx.ac_flags & GCM_MODE) { gcm_clear_ctx((gcm_ctx_t *)&aes_ctx); } return (ret); } /* * KCF software provider context template entry points. */ static int aes_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size) { void *keysched; size_t size; int rv; - if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE && - mechanism->cm_type != AES_CBC_MECH_INFO_TYPE && - mechanism->cm_type != AES_CTR_MECH_INFO_TYPE && - mechanism->cm_type != AES_CCM_MECH_INFO_TYPE && - mechanism->cm_type != AES_GCM_MECH_INFO_TYPE && - mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE) + if (mechanism->cm_type != AES_CCM_MECH_INFO_TYPE && + mechanism->cm_type != AES_GCM_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); if ((keysched = aes_alloc_keysched(&size, KM_SLEEP)) == NULL) { return (CRYPTO_HOST_MEMORY); } /* * Initialize key schedule. Key length information is stored * in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { memset(keysched, 0, size); kmem_free(keysched, size); return (rv); } *tmpl = keysched; *tmpl_size = size; return (CRYPTO_SUCCESS); } static int aes_free_context(crypto_ctx_t *ctx) { aes_ctx_t *aes_ctx = ctx->cc_provider_private; if (aes_ctx != NULL) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { ASSERT(aes_ctx->ac_keysched_len != 0); memset(aes_ctx->ac_keysched, 0, aes_ctx->ac_keysched_len); kmem_free(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); } crypto_free_mode_ctx(aes_ctx); ctx->cc_provider_private = NULL; } return (CRYPTO_SUCCESS); } static int aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag, boolean_t is_encrypt_init) { int rv = CRYPTO_SUCCESS; void *keysched; size_t size = 0; if (template == NULL) { if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) return (CRYPTO_HOST_MEMORY); /* * Initialize key schedule. * Key length is stored in the key. */ if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { kmem_free(keysched, size); return (rv); } aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE; aes_ctx->ac_keysched_len = size; } else { keysched = template; } aes_ctx->ac_keysched = keysched; switch (mechanism->cm_type) { - case AES_CBC_MECH_INFO_TYPE: - rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param, - mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64); - break; - case AES_CTR_MECH_INFO_TYPE: { - CK_AES_CTR_PARAMS *pp; - - if (mechanism->cm_param == NULL || - mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) { - return (CRYPTO_MECHANISM_PARAM_INVALID); - } - pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param; - rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits, - pp->cb, aes_copy_block); - break; - } case AES_CCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param, kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block); break; case AES_GCM_MECH_INFO_TYPE: if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) { return (CRYPTO_MECHANISM_PARAM_INVALID); } rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, aes_xor_block); break; - case AES_GMAC_MECH_INFO_TYPE: - if (mechanism->cm_param == NULL || - mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) { - return (CRYPTO_MECHANISM_PARAM_INVALID); - } - rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param, - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block, - aes_xor_block); - break; - case AES_ECB_MECH_INFO_TYPE: - aes_ctx->ac_flags |= ECB_MODE; } if (rv != CRYPTO_SUCCESS) { if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) { memset(keysched, 0, size); kmem_free(keysched, size); } } return (rv); } - -static int -process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data, - CK_AES_GCM_PARAMS *gcm_params) -{ - /* LINTED: pointer alignment */ - CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param; - - if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE) - return (CRYPTO_MECHANISM_INVALID); - - if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) - return (CRYPTO_MECHANISM_PARAM_INVALID); - - if (params->pIv == NULL) - return (CRYPTO_MECHANISM_PARAM_INVALID); - - gcm_params->pIv = params->pIv; - gcm_params->ulIvLen = AES_GMAC_IV_LEN; - gcm_params->ulTagBits = AES_GMAC_TAG_BITS; - - if (data == NULL) - return (CRYPTO_SUCCESS); - - if (data->cd_format != CRYPTO_DATA_RAW) - return (CRYPTO_ARGUMENTS_BAD); - - gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base; - gcm_params->ulAADLen = data->cd_length; - return (CRYPTO_SUCCESS); -} - -static int -aes_mac_atomic(crypto_mechanism_t *mechanism, - crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, - crypto_spi_ctx_template_t template) -{ - CK_AES_GCM_PARAMS gcm_params; - crypto_mechanism_t gcm_mech; - int rv; - - if ((rv = process_gmac_mech(mechanism, data, &gcm_params)) - != CRYPTO_SUCCESS) - return (rv); - - gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE; - gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); - gcm_mech.cm_param = (char *)&gcm_params; - - return (aes_encrypt_atomic(&gcm_mech, - key, &null_crypto_data, mac, template)); -} - -static int -aes_mac_verify_atomic(crypto_mechanism_t *mechanism, crypto_key_t *key, - crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t template) -{ - CK_AES_GCM_PARAMS gcm_params; - crypto_mechanism_t gcm_mech; - int rv; - - if ((rv = process_gmac_mech(mechanism, data, &gcm_params)) - != CRYPTO_SUCCESS) - return (rv); - - gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE; - gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); - gcm_mech.cm_param = (char *)&gcm_params; - - return (aes_decrypt_atomic(&gcm_mech, - key, mac, &null_crypto_data, template)); -}