Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F141979587
D27454.id80253.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
20 KB
Referenced Files
None
Subscribers
None
D27454.id80253.diff
View Options
Index: sys/crypto/armv8/ktls_armv8.c
===================================================================
--- /dev/null
+++ sys/crypto/armv8/ktls_armv8.c
@@ -0,0 +1,558 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Ampere Computing
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/ktls.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sockbuf.h>
+#include <sys/filedesc.h>
+#include <sys/sysctl.h>
+#include <sys/counter.h>
+#include <sys/uio.h>
+#include <sys/module.h>
+#include <opencrypto/xform.h>
+#include <sys/smp.h>
+
+#define KTLS_AEAD_TAGLEN 16
+
+typedef struct {
+ uint32_t aes_ek[4*(RIJNDAEL_MAXNR + 1)];
+ int aes_nr;
+} AES_key_t;
+
+typedef union {
+ uint64_t u[2];
+ uint32_t d[4];
+ uint8_t c[16];
+ size_t t[16 / sizeof(size_t)];
+} __uint128_val_t;
+
+typedef struct {
+ __uint128_val_t EK0, Xi, H, len;
+ __uint128_val_t Htable[16];
+} GMAC_ctx_t;
+
+/* Prototypes for aesv8-armx.S */
+void aes_v8_encrypt(uint8_t *in, uint8_t *out, AES_key_t *key);
+int aes_v8_set_encrypt_key(const unsigned char *userKey, const int bits, AES_key_t *key);
+
+/* Prototypes for ghashv8-armx.S */
+void gcm_init_v8(__uint128_val_t Htable[16], const uint64_t Xi[2]);
+void gcm_gmult_v8(uint64_t Xi[2], const __uint128_val_t Htable[16]);
+void gcm_ghash_v8(uint64_t Xi[2], const __uint128_val_t Htable[16], const uint8_t *inp, size_t len);
+
+struct armv8_gcm_struct {
+ AES_key_t aes_key;
+ __uint128_val_t EKi;
+ GMAC_ctx_t gmac_ctx;
+ uint8_t aes_counter[AES_BLOCK_LEN];
+};
+
+static MALLOC_DEFINE(M_ARMV8_GCM, "armv8_tls", "ARMv8 TLS");
+
+static struct mtx *ctx_mtx;
+static struct fpu_kern_ctx **ctx_vfp;
+
+#define AQUIRE_CTX(i, ctx) \
+ do { \
+ (i) = PCPU_GET(cpuid); \
+ /*mtx_lock(&ctx_mtx[(i)]);*/ \
+ (ctx) = ctx_vfp[(i)]; \
+ } while (0)
+#define RELEASE_CTX(i, ctx) \
+ do { \
+ /*mtx_unlock(&ctx_mtx[(i)]);*/ \
+ (i) = -1; \
+ (ctx) = NULL; \
+ } while (0)
+
+#define AES_INC_COUNTER(sc) \
+ do { \
+ for (int i = AES_BLOCK_LEN - 1; \
+ i >= 0; i--) \
+ if (++(sc)->aes_counter[i]) \
+ break; \
+ } while (0)
+
+SYSCTL_DECL(_kern_ipc_tls);
+
+static int ktls_use_armv8_gcm = 1;
+SYSCTL_INT(_kern_ipc_tls, OID_AUTO, armv8_gcm, CTLFLAG_RW,
+ &ktls_use_armv8_gcm, 1,
+ "Should we use the ARMv8 GCM if available");
+
+SYSCTL_DECL(_kern_ipc_tls_stats);
+
+static counter_u64_t ktls_offload_armv8_aead;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_aead_crypts,
+ CTLFLAG_RD, &ktls_offload_armv8_aead,
+ "Total number of ARMv8 TLS AEAD encrypts called");
+
+static counter_u64_t ktls_offload_armv8_tls12;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_tls12_crypts,
+ CTLFLAG_RD, &ktls_offload_armv8_tls12,
+ "Total number of ARMv8 TLSv1.2 encrypts called");
+
+static counter_u64_t ktls_offload_armv8_tls13;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_tls13_crypts,
+ CTLFLAG_RD, &ktls_offload_armv8_tls13,
+ "Total number of ARMv8 TLSv1.3 encrypts called");
+
+static counter_u64_t ktls_offload_armv8_unaligned_mem_b;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_unaligned_bytes,
+ CTLFLAG_RD, &ktls_offload_armv8_unaligned_mem_b,
+ "Byte cnt of ARMv8 TLS unaligned");
+
+static counter_u64_t ktls_offload_armv8_aligned_mem_b;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_aligned_bytes,
+ CTLFLAG_RD, &ktls_offload_armv8_aligned_mem_b,
+ "Byte cnt of ARMv8 TLS aligned");
+
+static counter_u64_t ktls_offload_armv8_glue_mem_b;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_glue_bytes,
+ CTLFLAG_RD, &ktls_offload_armv8_glue_mem_b,
+ "Byte cnt of ARMv8 TLS \"glue\" between iovecs");
+
+static counter_u64_t ktls_offload_armv8_unaligned_mem;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_unaligned,
+ CTLFLAG_RD, &ktls_offload_armv8_unaligned_mem,
+ "Call cnt of ARMv8 TLS unaligned");
+
+static counter_u64_t ktls_offload_armv8_aligned_mem;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_aligned,
+ CTLFLAG_RD, &ktls_offload_armv8_aligned_mem,
+ "Call cnt of ARMv8 TLS aligned");
+
+static counter_u64_t ktls_offload_armv8_glue_mem;
+SYSCTL_COUNTER_U64(_kern_ipc_tls_stats, OID_AUTO, armv8_glue,
+ CTLFLAG_RD, &ktls_offload_armv8_glue_mem,
+ "Call cnt of ARMv8 TLS \"glue\" between iovecs");
+
+static int
+ktls_armv8ktls_aead_encrypt(struct ktls_session *tls,
+ const struct tls_record_layer *hdr, uint8_t *tag, struct iovec *iniov,
+ struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t tls_rtype)
+{
+ struct armv8_gcm_struct *sc;
+ struct fpu_kern_ctx *ctx;
+ int kt, i, iov;
+ uint8_t *in, *out;
+ uint8_t *previn, *prevout;
+ __uint128_val_t *in128, *out128;
+ uint64_t len, trailer_len, header_len;
+ struct tls_aead_data ad;
+ struct tls_aead_data_13 ad13;
+ struct tls_nonce_data nd;
+ char nonce[12];
+ size_t adlen, payload_length;
+ uint8_t *adptr;
+ uint16_t tls_comp_len;
+ uint8_t block[AES_BLOCK_LEN];
+ bool is_tls13;
+
+ sc = (struct armv8_gcm_struct *)tls->cipher;
+
+ kt = is_fpu_kern_thread(0);
+ if (!kt) {
+ AQUIRE_CTX(i, ctx);
+ fpu_kern_enter(curthread, ctx,
+ FPU_KERN_NORMAL | FPU_KERN_KTHR);
+ }
+
+ KASSERT(sc != NULL, ("Null cipher"));
+ counter_u64_add(ktls_offload_armv8_aead, 1);
+
+ is_tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
+ if (is_tls13)
+ counter_u64_add(ktls_offload_armv8_tls13, 1);
+ else
+ counter_u64_add(ktls_offload_armv8_tls12, 1);
+
+ /* Setup the associated data */
+ if (is_tls13) {
+ ad13.type = hdr->tls_type;
+ ad13.tls_vmajor = hdr->tls_vmajor;
+ ad13.tls_vminor = hdr->tls_vminor;
+ ad13.tls_length = hdr->tls_length;
+ adlen = sizeof(ad13);
+ adptr = (uint8_t *)&ad13;
+ } else {
+ ad.seq = htobe64(seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ tls_comp_len = ntohs(hdr->tls_length) -
+ (KTLS_AEAD_TAGLEN + sizeof(nd.seq));
+ ad.tls_length = htons(tls_comp_len);
+
+ adlen = sizeof(ad);
+ adptr = (uint8_t *)&ad;
+ }
+
+ bzero(&sc->aes_counter, AES_BLOCK_LEN);
+ /* Setup the nonce */
+ if (is_tls13) {
+ memcpy(nonce, tls->params.iv, tls->params.iv_len);
+ *(uint64_t *)(nonce + 4) ^= htobe64(seqno);
+ memcpy(sc->aes_counter, nonce, sizeof(nonce));
+ } else {
+ memcpy(&nd, tls->params.iv, tls->params.iv_len);
+ memcpy(&nd.seq, hdr + 1, sizeof(nd.seq));
+ memcpy(sc->aes_counter, &nd, sizeof(nd));
+ }
+ /* Setup the counter */
+ sc->aes_counter[AES_BLOCK_LEN - 1] = 1;
+
+ /* EK0 for a final GMAC round */
+ aes_v8_encrypt(sc->aes_counter, sc->gmac_ctx.EK0.c, &sc->aes_key);
+
+ /* GCM starts with 2 as counter, 1 is used for final xor of tag. */
+ sc->aes_counter[AES_BLOCK_LEN - 1] = 2;
+
+ memset(sc->gmac_ctx.Xi.c, 0, sizeof(sc->gmac_ctx.Xi.c));
+
+ /*
+ * Feed AEAD data to the GMAC
+ */
+ KASSERT(adlen <= AES_BLOCK_LEN, ("AEAD data is larger than block"));
+ memset(block, 0, sizeof(block));
+ memcpy(block, adptr, adlen);
+ gcm_ghash_v8(sc->gmac_ctx.Xi.u, sc->gmac_ctx.Htable, block, AES_BLOCK_LEN);
+
+ payload_length = 0;
+ trailer_len = 0;
+
+ for (iov = 0; iov < iovcnt; iov++) {
+ in = iniov[iov].iov_base;
+ out = outiov[iov].iov_base;
+ len = iniov[iov].iov_len;
+
+ /* Previous iovec had a trailer */
+ if (trailer_len) {
+ aes_v8_encrypt(sc->aes_counter, sc->EKi.c, &sc->aes_key);
+ AES_INC_COUNTER(sc);
+
+ /* Last iov may not have enough data to fill the block */
+ header_len = min(AES_BLOCK_LEN - trailer_len, len);
+
+ /* Encode and prepare block for GMAC */
+ i = 0;
+ while (i < trailer_len) {
+ block[i] = prevout[i] = previn[i] ^ sc->EKi.c[i];
+ i++;
+ }
+
+ while (i < header_len + trailer_len) {
+ block[i] = out[i - trailer_len] = in[i - trailer_len] ^ sc->EKi.c[i];
+ i++;
+ }
+
+ while (i < AES_BLOCK_LEN) {
+ block[i] = 0;
+ i++;
+ }
+
+ gcm_ghash_v8(sc->gmac_ctx.Xi.u, sc->gmac_ctx.Htable, block, sizeof(block));
+
+ payload_length += trailer_len + header_len;
+ counter_u64_add(ktls_offload_armv8_glue_mem, 1);
+ counter_u64_add(ktls_offload_armv8_glue_mem_b, trailer_len + header_len);
+
+ in += header_len;
+ out += header_len;
+ len -= header_len;
+ trailer_len = 0;
+ }
+
+ if (len == 0)
+ continue;
+
+ trailer_len = len & (AES_BLOCK_LEN - 1);
+ len -= trailer_len;
+ payload_length += len;
+
+ in128 = (__uint128_val_t *)in;
+ out128 = (__uint128_val_t *)out;
+ for (i = 0; i < len; i += AES_BLOCK_LEN, in128++, out128++) {
+ aes_v8_encrypt(sc->aes_counter, sc->EKi.c, &sc->aes_key);
+ AES_INC_COUNTER(sc);
+ out128->u[0] = in128->u[0] ^ sc->EKi.u[0];
+ out128->u[1] = in128->u[1] ^ sc->EKi.u[1];
+ gcm_ghash_v8(sc->gmac_ctx.Xi.u, sc->gmac_ctx.Htable, out128->c, AES_BLOCK_LEN);
+ }
+
+ if ((uintptr_t)in & 0xf) {
+ counter_u64_add(ktls_offload_armv8_unaligned_mem, 1);
+ counter_u64_add(ktls_offload_armv8_unaligned_mem_b, len);
+ } else {
+ counter_u64_add(ktls_offload_armv8_aligned_mem, 1);
+ counter_u64_add(ktls_offload_armv8_aligned_mem_b, len);
+ }
+
+ in += len;
+ out += len;
+
+ previn = in;
+ prevout = out;
+ }
+
+ /* Encode the trailer of the whole data */
+ if (trailer_len || is_tls13) {
+ aes_v8_encrypt(sc->aes_counter, sc->EKi.c, &sc->aes_key);
+ AES_INC_COUNTER(sc);
+
+ i = 0;
+ while (i < trailer_len) {
+ block[i] = out[i] = in[i] ^ sc->EKi.c[i];
+ i++;
+ }
+
+ /*
+ * Although record type byte is part of the tag physically
+ * logically it's counted as a part of payload data
+ */
+ if (is_tls13) {
+ block[i] = tag[0] = tls_rtype ^ sc->EKi.c[i];
+ payload_length += 1;
+ i++;
+ }
+
+ while (i < AES_BLOCK_LEN) {
+ block[i] = 0;
+ i++;
+ }
+
+ gcm_ghash_v8(sc->gmac_ctx.Xi.u, sc->gmac_ctx.Htable, block, sizeof(block));
+ payload_length += trailer_len;
+ }
+
+ /* Lengths block */
+ sc->gmac_ctx.len.d[1] = htobe32(adlen * 8);
+ sc->gmac_ctx.len.d[3] = htobe32(payload_length * 8);
+ gcm_ghash_v8(sc->gmac_ctx.Xi.u, sc->gmac_ctx.Htable, sc->gmac_ctx.len.c, AES_BLOCK_LEN);
+ sc->gmac_ctx.Xi.u[0] ^= sc->gmac_ctx.EK0.u[0];
+ sc->gmac_ctx.Xi.u[1] ^= sc->gmac_ctx.EK0.u[1];
+ if (is_tls13)
+ memcpy(tag + 1, sc->gmac_ctx.Xi.c, AES_BLOCK_LEN);
+ else
+ memcpy(tag, sc->gmac_ctx.Xi.c, AES_BLOCK_LEN);
+
+ if (!kt) {
+ fpu_kern_leave(curthread, ctx);
+ RELEASE_CTX(i, ctx);
+ }
+
+ return(0);
+}
+
+static int
+ktls_armv8ktls_setup_cipher(struct armv8_gcm_struct *sc, uint8_t *key, int keylen)
+{
+ struct fpu_kern_ctx *fpu_ctx;
+
+ if (key == NULL)
+ return (EINVAL);
+
+ fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NOWAIT);
+ if (fpu_ctx == NULL)
+ return (ENOMEM);
+ fpu_kern_enter(curthread, fpu_ctx, FPU_KERN_NORMAL);
+
+ aes_v8_set_encrypt_key(key, keylen * 8, &sc->aes_key);
+
+ memset(sc->gmac_ctx.H.c, 0, sizeof(sc->gmac_ctx.H.c));
+ aes_v8_encrypt(sc->gmac_ctx.H.c, sc->gmac_ctx.H.c, &sc->aes_key);
+ sc->gmac_ctx.H.u[0] = bswap64(sc->gmac_ctx.H.u[0]);
+ sc->gmac_ctx.H.u[1] = bswap64(sc->gmac_ctx.H.u[1]);
+ gcm_init_v8(sc->gmac_ctx.Htable, sc->gmac_ctx.H.u);
+
+ fpu_kern_leave(curthread, fpu_ctx);
+ fpu_kern_free_ctx(fpu_ctx);
+
+ return (0);
+}
+
+static void
+ktls_armv8ktls_free(struct ktls_session *tls)
+{
+ struct armv8_gcm_struct *sc;
+
+ sc = tls->cipher;
+ explicit_bzero(sc, sizeof(*sc));
+ free(sc, M_ARMV8_GCM);
+}
+
+static int
+ktls_armv8ktls_try(struct socket *so, struct ktls_session *tls, int direction)
+{
+ struct armv8_gcm_struct *sc;
+ int error;
+
+ if (direction != KTLS_TX)
+ return (EOPNOTSUPP);
+
+ if (!ktls_use_armv8_gcm)
+ return (EOPNOTSUPP);
+
+ if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ /* Only TLS 1.2 and 1.3 are supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ /* TLS 1.3 is not yet supported for receive. */
+ if (direction == KTLS_RX &&
+ tls->params.tls_vminor == TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ sc = malloc(sizeof (*sc), M_ARMV8_GCM, M_NOWAIT | M_ZERO);
+ if (sc == NULL)
+ return (ENOMEM);
+
+ error = ktls_armv8ktls_setup_cipher(sc,
+ tls->params.cipher_key,
+ tls->params.cipher_key_len);
+ if (error) {
+ free(sc, M_ARMV8_GCM);
+ return (error);
+ }
+
+ tls->cipher = sc;
+ tls->sw_encrypt = ktls_armv8ktls_aead_encrypt;
+ tls->free = ktls_armv8ktls_free;
+ return (0);
+ }
+
+ return (EOPNOTSUPP);
+}
+
+struct ktls_crypto_backend armv8ktls_backend = {
+ .name = "ARMv8 AES GCM",
+ .prio = 20,
+ .api_version = KTLS_API_VERSION,
+ .try = ktls_armv8ktls_try,
+};
+
+static int
+armv8ktls_init(void)
+{
+ int i;
+
+ ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_GCM,
+ M_WAITOK|M_ZERO);
+ ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_GCM,
+ M_WAITOK|M_ZERO);
+
+ CPU_FOREACH(i) {
+ ctx_vfp[i] = fpu_kern_alloc_ctx(0);
+ mtx_init(&ctx_mtx[i], "armv8ktlsctx", NULL, MTX_DEF|MTX_NEW);
+ }
+
+ ktls_offload_armv8_aead = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_aligned_mem = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_aligned_mem_b = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_unaligned_mem = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_unaligned_mem_b = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_glue_mem = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_glue_mem_b = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_tls12 = counter_u64_alloc(M_WAITOK);
+ ktls_offload_armv8_tls13 = counter_u64_alloc(M_WAITOK);
+
+ return (ktls_crypto_backend_register(&armv8ktls_backend));
+}
+
+static int
+armv8ktls_unload(void)
+{
+ int i, error;
+
+ error = ktls_crypto_backend_deregister(&armv8ktls_backend);
+ if (error != 0)
+ return (error);
+
+ CPU_FOREACH(i) {
+ if (ctx_vfp[i] != NULL) {
+ mtx_destroy(&ctx_mtx[i]);
+ fpu_kern_free_ctx(ctx_vfp[i]);
+ }
+ ctx_vfp[i] = NULL;
+ }
+ free(ctx_mtx, M_ARMV8_GCM);
+ ctx_mtx = NULL;
+ free(ctx_vfp, M_ARMV8_GCM);
+ ctx_vfp = NULL;
+
+ counter_u64_free(ktls_offload_armv8_aead);
+ counter_u64_free(ktls_offload_armv8_aligned_mem);
+ counter_u64_free(ktls_offload_armv8_aligned_mem_b);
+ counter_u64_free(ktls_offload_armv8_unaligned_mem);
+ counter_u64_free(ktls_offload_armv8_unaligned_mem_b);
+ counter_u64_free(ktls_offload_armv8_glue_mem);
+ counter_u64_free(ktls_offload_armv8_glue_mem_b);
+ counter_u64_free(ktls_offload_armv8_tls12);
+ counter_u64_free(ktls_offload_armv8_tls13);
+
+ return (0);
+}
+
+static int
+armv8ktls_module_event_handler(module_t mod, int evt, void *arg)
+{
+ switch (evt) {
+ case MOD_LOAD:
+ return (armv8ktls_init());
+ case MOD_UNLOAD:
+ return (armv8ktls_unload());
+ default:
+ return (EOPNOTSUPP);
+ }
+}
+
+static moduledata_t armv8ktls_moduledata = {
+ "armv8ktls",
+ armv8ktls_module_event_handler,
+ NULL
+};
+
+DECLARE_MODULE(armv8ktls, armv8ktls_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);
Index: sys/modules/Makefile
===================================================================
--- sys/modules/Makefile
+++ sys/modules/Makefile
@@ -205,6 +205,7 @@
khelp \
krpc \
ksyms \
+ ${_ktls_armv8} \
${_ktls_ocf} \
le \
lge \
@@ -601,6 +602,7 @@
.if ${MACHINE_CPUARCH} == "aarch64"
_allwinner= allwinner
_armv8crypto= armv8crypto
+_ktls_armv8= ktls_armv8
_em= em
_rockchip= rockchip
.endif
Index: sys/modules/ktls_armv8/Makefile
===================================================================
--- /dev/null
+++ sys/modules/ktls_armv8/Makefile
@@ -0,0 +1,33 @@
+# $FreeBSD$
+
+.PATH: ${SRCTOP}/sys/crypto/armv8
+.PATH: ${SRCTOP}/sys/crypto/openssl/aarch64
+
+KMOD= ktls_armv8
+SRCS= ktls_armv8.c
+SRCS+= device_if.h bus_if.h opt_bus.h cryptodev_if.h
+
+OBJS+= aesv8-armx.o ghashv8-armx.o
+
+
+aesv8-armx.o: aesv8-armx.S
+ ${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} \
+ -I${SRCTOP}/sys/crypto/armv8 \
+ -I${SRCTOP}/sys/crypto/openssl/crypto \
+ -I${.CURDIR} \
+ -Igonzo \
+ ${WERROR} ${PROF} \
+ -march=armv8-a+crypto ${.IMPSRC}
+ ${CTFCONVERT_CMD}
+
+ghashv8-armx.o: ghashv8-armx.S
+ ${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} \
+ -I${SRCTOP}/sys/crypto/armv8 \
+ -I${SRCTOP}/sys/crypto/openssl/crypto \
+ -I${.CURDIR} \
+ -Igonzo \
+ ${WERROR} ${PROF} \
+ -march=armv8-a+crypto ${.IMPSRC}
+ ${CTFCONVERT_CMD}
+
+.include <bsd.kmod.mk>
Index: sys/modules/ktls_armv8/arm_arch.h
===================================================================
--- /dev/null
+++ sys/modules/ktls_armv8/arm_arch.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2011-2018 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_CRYPTO_ARM_ARCH_H
+# define OSSL_CRYPTO_ARM_ARCH_H
+
+# if !defined(__ARM_ARCH__)
+# if defined(__CC_ARM)
+# define __ARM_ARCH__ __TARGET_ARCH_ARM
+# if defined(__BIG_ENDIAN)
+# define __ARMEB__
+# else
+# define __ARMEL__
+# endif
+# elif defined(__GNUC__)
+# if defined(__aarch64__)
+# define __ARM_ARCH__ 8
+# if __BYTE_ORDER__==__ORDER_BIG_ENDIAN__
+# define __ARMEB__
+# else
+# define __ARMEL__
+# endif
+ /*
+ * Why doesn't gcc define __ARM_ARCH__? Instead it defines
+ * bunch of below macros. See all_architectures[] table in
+ * gcc/config/arm/arm.c. On a side note it defines
+ * __ARMEL__/__ARMEB__ for little-/big-endian.
+ */
+# elif defined(__ARM_ARCH)
+# define __ARM_ARCH__ __ARM_ARCH
+# elif defined(__ARM_ARCH_8A__)
+# define __ARM_ARCH__ 8
+# elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__)|| defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_7EM__)
+# define __ARM_ARCH__ 7
+# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__)|| defined(__ARM_ARCH_6M__) || \
+ defined(__ARM_ARCH_6Z__)|| defined(__ARM_ARCH_6ZK__) || \
+ defined(__ARM_ARCH_6T2__)
+# define __ARM_ARCH__ 6
+# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5E__)|| defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_5TEJ__)
+# define __ARM_ARCH__ 5
+# elif defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
+# define __ARM_ARCH__ 4
+# else
+# error "unsupported ARM architecture"
+# endif
+# endif
+# endif
+
+# if !defined(__ARM_MAX_ARCH__)
+# define __ARM_MAX_ARCH__ __ARM_ARCH__
+# endif
+
+# if __ARM_MAX_ARCH__<__ARM_ARCH__
+# error "__ARM_MAX_ARCH__ can't be less than __ARM_ARCH__"
+# elif __ARM_MAX_ARCH__!=__ARM_ARCH__
+# if __ARM_ARCH__<7 && __ARM_MAX_ARCH__>=7 && defined(__ARMEB__)
+# error "can't build universal big-endian binary"
+# endif
+# endif
+
+# ifndef __ASSEMBLER__
+extern unsigned int OPENSSL_armcap_P;
+# endif
+
+# define ARMV7_NEON (1<<0)
+# define ARMV7_TICK (1<<1)
+# define ARMV8_AES (1<<2)
+# define ARMV8_SHA1 (1<<3)
+# define ARMV8_SHA256 (1<<4)
+# define ARMV8_PMULL (1<<5)
+# define ARMV8_SHA512 (1<<6)
+
+#endif
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Jan 15, 11:31 AM (9 h, 35 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27647982
Default Alt Text
D27454.id80253.diff (20 KB)
Attached To
Mode
D27454: Add AES-GCM H/W acceleration for kTLS on ARMv8 architecture
Attached
Detach File
Event Timeline
Log In to Comment