Page MenuHomeFreeBSD

D2651.diff
No OneTemporary

D2651.diff

Index: lib/libcpufeats/Makefile.inc
===================================================================
--- /dev/null
+++ lib/libcpufeats/Makefile.inc
@@ -0,0 +1,6 @@
+# $FreeBSD$
+
+CPUFEATSDIR := ${.PARSEDIR}
+CPUFEATSCFLAGS = -I${CPUFEATSDIR}
+
+CPUFEATS_SRC = cpufeats.c
Index: lib/libcpufeats/README
===================================================================
--- /dev/null
+++ lib/libcpufeats/README
@@ -0,0 +1,14 @@
+Though this appears to be a library, it is not. It is meant to be
+directly included in your code.
+
+To use this, include the Makefile.inc in your Makefile, similar to:
+.include <../libcpufeats/Makefile.inc>
+
+Include the directory in the .PATH
+.PATH: ${CPUFEATSDIR}
+
+Include the necessary CFLAGS for building:
+CFLAGS+= ${CPUFEATSCFLAGS}
+
+And include the source:
+SRCS+= $(CPUFEATS_SRC)
Index: lib/libcpufeats/cpufeats.h
===================================================================
--- /dev/null
+++ lib/libcpufeats/cpufeats.h
@@ -0,0 +1,15 @@
+#ifndef _CPUFEATS_H_
+#define _CPUFEATS_H_
+
+#include <sys/types.h>
+#include <machine/specialreg.h>
+
+/* from machine/md_var.h */
+extern u_int cpu_vendor_id;
+extern u_int cpu_exthigh;
+extern u_int cpu_feature;
+extern u_int cpu_feature2;
+extern u_int amd_feature;
+extern u_int amd_feature2;
+
+#endif /* _CPUFEATS_H_ */
Index: lib/libcpufeats/cpufeats.c
===================================================================
--- /dev/null
+++ lib/libcpufeats/cpufeats.c
@@ -0,0 +1,136 @@
+/*-
+ * Copyright 2015 John-Mark Gurney.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <machine/cpufunc.h>
+#include <machine/cputypes.h>
+
+#include "cpufeats.h"
+
+u_int cpu_vendor_id;
+u_int cpu_exthigh;
+u_int cpu_feature;
+u_int cpu_feature2;
+u_int amd_feature;
+u_int amd_feature2;
+
+static u_int cpu_high;
+static char cpu_vendor[13];
+
+static const struct {
+ const char *vendor;
+ u_int vendor_id;
+} cpu_vendors[] = {
+ { INTEL_VENDOR_ID, CPU_VENDOR_INTEL }, /* GenuineIntel */
+ { AMD_VENDOR_ID, CPU_VENDOR_AMD }, /* AuthenticAMD */
+ { CENTAUR_VENDOR_ID, CPU_VENDOR_CENTAUR }, /* CentaurHauls */
+};
+
+static u_int
+find_cpu_vendor_id(void)
+{
+ size_t i;
+
+ for (i = 0; i < sizeof(cpu_vendors) / sizeof(cpu_vendors[0]); i++)
+ if (strcmp(cpu_vendor, cpu_vendors[i].vendor) == 0)
+ return (cpu_vendors[i].vendor_id);
+ return (0);
+}
+
+static int
+check_cpuid(void)
+{
+ int cpuid_supported;
+
+#ifdef __i386__
+ __asm __volatile(
+ " pushfl\n"
+ " popl %%eax\n"
+ " movl %%eax,%%ecx\n"
+ " xorl $0x200000,%%eax\n"
+ " pushl %%eax\n"
+ " popfl\n"
+ " pushfl\n"
+ " popl %%eax\n"
+ " xorl %%eax,%%ecx\n"
+ " je 1f\n"
+ " movl $1,%0\n"
+ " jmp 2f\n"
+ "1: movl $0,%0\n"
+ "2:\n"
+ : "=r" (cpuid_supported) : : "eax", "ecx");
+#elif defined(__amd64__)
+ cpuid_supported = 1;
+#else
+#error Non-i386 or amd64 arch!
+#endif
+ return (cpuid_supported);
+}
+
+/* default priority appears to be 65535, make sure we are before others */
+static void __attribute__((constructor(1000)))
+cpu_features_init()
+{
+ u_int regs[4];
+
+ if (!check_cpuid())
+ return;
+
+ /* From: x86/x86/identcpu.c */
+ do_cpuid(0, regs);
+ cpu_high = regs[0];
+ /* void * to eliminate -Wcast-align */
+ ((u_int *)(void *)&cpu_vendor)[0] = regs[1];
+ ((u_int *)(void *)&cpu_vendor)[1] = regs[3];
+ ((u_int *)(void *)&cpu_vendor)[2] = regs[2];
+ cpu_vendor[12] = '\0';
+
+ cpu_vendor_id = find_cpu_vendor_id();
+
+ do_cpuid(1, regs);
+ cpu_feature = regs[3];
+ cpu_feature2 = regs[2];
+
+ if ( cpu_high > 0 && (cpu_vendor_id == CPU_VENDOR_INTEL ||
+ cpu_vendor_id == CPU_VENDOR_AMD ||
+ cpu_vendor_id == CPU_VENDOR_CENTAUR)) {
+ do_cpuid(0x80000000, regs);
+ if (regs[0] >= 0x80000000)
+ cpu_exthigh = regs[0];
+ }
+
+ if (cpu_exthigh >= 0x80000001) {
+ do_cpuid(0x80000001, regs);
+ amd_feature = regs[3] & ~(cpu_feature & 0x0183f3ff);
+ amd_feature2 = regs[2];
+ }
+}
Index: lib/libcrypt/Makefile
===================================================================
--- lib/libcrypt/Makefile
+++ lib/libcrypt/Makefile
@@ -26,6 +26,8 @@
CFLAGS+= -I${.CURDIR} -DHAS_DES -DHAS_BLOWFISH
.endif
+.include <${.CURDIR}/../libmd/Makefile.inc>
+
.for sym in MD4Init MD4Final MD4Update MD4Pad \
MD5Init MD5Final MD5Update MD5Pad \
SHA256_Init SHA256_Final SHA256_Update \
Index: lib/libmd/Makefile
===================================================================
--- lib/libmd/Makefile
+++ lib/libmd/Makefile
@@ -49,7 +49,7 @@
# * macros are used to rename symbols to libcrypt internal names
# * no weak aliases are generated
CFLAGS+= -I${.CURDIR} -DWEAK_REFS
-.PATH: ${.CURDIR}/${MACHINE_ARCH} ${.CURDIR}/../../sys/crypto/sha2
+.PATH: ${.CURDIR}/${MACHINE_ARCH}
.if exists(${MACHINE_ARCH}/sha.S)
SRCS+= sha.S
@@ -63,6 +63,8 @@
ACFLAGS+= -DELF -Wa,--noexecstack
.endif
+.include <Makefile.inc>
+
md4hl.c: mdXhl.c
(echo '#define LENGTH 16'; \
sed -e 's/mdX/md4/g' -e 's/MDX/MD4/g' ${.ALLSRC}) > ${.TARGET}
Index: lib/libmd/Makefile.inc
===================================================================
--- /dev/null
+++ lib/libmd/Makefile.inc
@@ -0,0 +1,26 @@
+# $FreeBSD$
+
+.include <../libcpufeats/Makefile.inc>
+
+.PATH: ${CPUFEATSDIR} ${.CURDIR}/../../sys/crypto/sha2
+
+CFLAGS+= ${CPUFEATSCFLAGS}
+
+.if ${MACHINE_CPUARCH} == "amd64"
+SRCS+= $(CPUFEATS_SRC)
+
+OBJS+= sha256_sse4.o
+
+sha256_sse4.o.uu: sha256_sse4.yasm
+ @echo "Please install yasm, and run make in ${.CURDIR}/../../sys/crypto/sha2."
+ @false
+
+sha256_sse4.o: sha256_sse4.o.uu
+ uudecode -o $@ < $> || rm $@
+
+sha256_sse4.po: sha256_sse4.o.uu
+ uudecode -o $@ < $> || rm $@
+
+sha256_sse4.So: sha256_sse4.o.uu
+ uudecode -o $@ < $> || rm $@
+.endif
Index: lib/libmd/mdXhl.c
===================================================================
--- lib/libmd/mdXhl.c
+++ lib/libmd/mdXhl.c
@@ -49,7 +49,7 @@
char *
MDXFileChunk(const char *filename, char *buf, off_t ofs, off_t len)
{
- unsigned char buffer[BUFSIZ];
+ unsigned char buffer[16*1024];
MDX_CTX ctx;
struct stat stbuf;
int f, i, e;
Index: lib/libmd/sha256.h
===================================================================
--- lib/libmd/sha256.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*-
- * Copyright 2005 Colin Percival
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _SHA256_H_
-#define _SHA256_H_
-
-#include <sys/types.h>
-
-typedef struct SHA256Context {
- uint32_t state[8];
- uint64_t count;
- unsigned char buf[64];
-} SHA256_CTX;
-
-__BEGIN_DECLS
-
-/* Ensure libmd symbols do not clash with libcrypto */
-
-#ifndef SHA256_Init
-#define SHA256_Init _libmd_SHA256_Init
-#endif
-#ifndef SHA256_Update
-#define SHA256_Update _libmd_SHA256_Update
-#endif
-#ifndef SHA256_Final
-#define SHA256_Final _libmd_SHA256_Final
-#endif
-#ifndef SHA256_End
-#define SHA256_End _libmd_SHA256_End
-#endif
-#ifndef SHA256_File
-#define SHA256_File _libmd_SHA256_File
-#endif
-#ifndef SHA256_FileChunk
-#define SHA256_FileChunk _libmd_SHA256_FileChunk
-#endif
-#ifndef SHA256_Data
-#define SHA256_Data _libmd_SHA256_Data
-#endif
-
-#ifndef SHA256_Transform
-#define SHA256_Transform _libmd_SHA256_Transform
-#endif
-#ifndef SHA256_version
-#define SHA256_version _libmd_SHA256_version
-#endif
-
-void SHA256_Init(SHA256_CTX *);
-void SHA256_Update(SHA256_CTX *, const void *, size_t);
-void SHA256_Final(unsigned char [32], SHA256_CTX *);
-char *SHA256_End(SHA256_CTX *, char *);
-char *SHA256_File(const char *, char *);
-char *SHA256_FileChunk(const char *, char *, off_t, off_t);
-char *SHA256_Data(const void *, unsigned int, char *);
-__END_DECLS
-
-#endif /* !_SHA256_H_ */
Index: lib/libmd/sha256c.c
===================================================================
--- lib/libmd/sha256c.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*-
- * Copyright 2005 Colin Percival
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/endian.h>
-#include <sys/types.h>
-
-#include <string.h>
-
-#include "sha256.h"
-
-#if BYTE_ORDER == BIG_ENDIAN
-
-/* Copy a vector of big-endian uint32_t into a vector of bytes */
-#define be32enc_vect(dst, src, len) \
- memcpy((void *)dst, (const void *)src, (size_t)len)
-
-/* Copy a vector of bytes into a vector of big-endian uint32_t */
-#define be32dec_vect(dst, src, len) \
- memcpy((void *)dst, (const void *)src, (size_t)len)
-
-#else /* BYTE_ORDER != BIG_ENDIAN */
-
-/*
- * Encode a length len/4 vector of (uint32_t) into a length len vector of
- * (unsigned char) in big-endian form. Assumes len is a multiple of 4.
- */
-static void
-be32enc_vect(unsigned char *dst, const uint32_t *src, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len / 4; i++)
- be32enc(dst + i * 4, src[i]);
-}
-
-/*
- * Decode a big-endian length len vector of (unsigned char) into a length
- * len/4 vector of (uint32_t). Assumes len is a multiple of 4.
- */
-static void
-be32dec_vect(uint32_t *dst, const unsigned char *src, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len / 4; i++)
- dst[i] = be32dec(src + i * 4);
-}
-
-#endif /* BYTE_ORDER != BIG_ENDIAN */
-
-/* Elementary functions used by SHA256 */
-#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
-#define Maj(x, y, z) ((x & (y | z)) | (y & z))
-#define SHR(x, n) (x >> n)
-#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
-#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
-#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
-#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3))
-#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHR(x, 10))
-
-/* SHA256 round function */
-#define RND(a, b, c, d, e, f, g, h, k) \
- t0 = h + S1(e) + Ch(e, f, g) + k; \
- t1 = S0(a) + Maj(a, b, c); \
- d += t0; \
- h = t0 + t1;
-
-/* Adjusted round function for rotating state */
-#define RNDr(S, W, i, k) \
- RND(S[(64 - i) % 8], S[(65 - i) % 8], \
- S[(66 - i) % 8], S[(67 - i) % 8], \
- S[(68 - i) % 8], S[(69 - i) % 8], \
- S[(70 - i) % 8], S[(71 - i) % 8], \
- W[i] + k)
-
-/*
- * SHA256 block compression function. The 256-bit state is transformed via
- * the 512-bit input block to produce a new state.
- */
-static void
-SHA256_Transform(uint32_t * state, const unsigned char block[64])
-{
- uint32_t W[64];
- uint32_t S[8];
- uint32_t t0, t1;
- int i;
-
- /* 1. Prepare message schedule W. */
- be32dec_vect(W, block, 64);
- for (i = 16; i < 64; i++)
- W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
-
- /* 2. Initialize working variables. */
- memcpy(S, state, 32);
-
- /* 3. Mix. */
- RNDr(S, W, 0, 0x428a2f98);
- RNDr(S, W, 1, 0x71374491);
- RNDr(S, W, 2, 0xb5c0fbcf);
- RNDr(S, W, 3, 0xe9b5dba5);
- RNDr(S, W, 4, 0x3956c25b);
- RNDr(S, W, 5, 0x59f111f1);
- RNDr(S, W, 6, 0x923f82a4);
- RNDr(S, W, 7, 0xab1c5ed5);
- RNDr(S, W, 8, 0xd807aa98);
- RNDr(S, W, 9, 0x12835b01);
- RNDr(S, W, 10, 0x243185be);
- RNDr(S, W, 11, 0x550c7dc3);
- RNDr(S, W, 12, 0x72be5d74);
- RNDr(S, W, 13, 0x80deb1fe);
- RNDr(S, W, 14, 0x9bdc06a7);
- RNDr(S, W, 15, 0xc19bf174);
- RNDr(S, W, 16, 0xe49b69c1);
- RNDr(S, W, 17, 0xefbe4786);
- RNDr(S, W, 18, 0x0fc19dc6);
- RNDr(S, W, 19, 0x240ca1cc);
- RNDr(S, W, 20, 0x2de92c6f);
- RNDr(S, W, 21, 0x4a7484aa);
- RNDr(S, W, 22, 0x5cb0a9dc);
- RNDr(S, W, 23, 0x76f988da);
- RNDr(S, W, 24, 0x983e5152);
- RNDr(S, W, 25, 0xa831c66d);
- RNDr(S, W, 26, 0xb00327c8);
- RNDr(S, W, 27, 0xbf597fc7);
- RNDr(S, W, 28, 0xc6e00bf3);
- RNDr(S, W, 29, 0xd5a79147);
- RNDr(S, W, 30, 0x06ca6351);
- RNDr(S, W, 31, 0x14292967);
- RNDr(S, W, 32, 0x27b70a85);
- RNDr(S, W, 33, 0x2e1b2138);
- RNDr(S, W, 34, 0x4d2c6dfc);
- RNDr(S, W, 35, 0x53380d13);
- RNDr(S, W, 36, 0x650a7354);
- RNDr(S, W, 37, 0x766a0abb);
- RNDr(S, W, 38, 0x81c2c92e);
- RNDr(S, W, 39, 0x92722c85);
- RNDr(S, W, 40, 0xa2bfe8a1);
- RNDr(S, W, 41, 0xa81a664b);
- RNDr(S, W, 42, 0xc24b8b70);
- RNDr(S, W, 43, 0xc76c51a3);
- RNDr(S, W, 44, 0xd192e819);
- RNDr(S, W, 45, 0xd6990624);
- RNDr(S, W, 46, 0xf40e3585);
- RNDr(S, W, 47, 0x106aa070);
- RNDr(S, W, 48, 0x19a4c116);
- RNDr(S, W, 49, 0x1e376c08);
- RNDr(S, W, 50, 0x2748774c);
- RNDr(S, W, 51, 0x34b0bcb5);
- RNDr(S, W, 52, 0x391c0cb3);
- RNDr(S, W, 53, 0x4ed8aa4a);
- RNDr(S, W, 54, 0x5b9cca4f);
- RNDr(S, W, 55, 0x682e6ff3);
- RNDr(S, W, 56, 0x748f82ee);
- RNDr(S, W, 57, 0x78a5636f);
- RNDr(S, W, 58, 0x84c87814);
- RNDr(S, W, 59, 0x8cc70208);
- RNDr(S, W, 60, 0x90befffa);
- RNDr(S, W, 61, 0xa4506ceb);
- RNDr(S, W, 62, 0xbef9a3f7);
- RNDr(S, W, 63, 0xc67178f2);
-
- /* 4. Mix local working variables into global state */
- for (i = 0; i < 8; i++)
- state[i] += S[i];
-}
-
-static unsigned char PAD[64] = {
- 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-/* Add padding and terminating bit-count. */
-static void
-SHA256_Pad(SHA256_CTX * ctx)
-{
- unsigned char len[8];
- uint32_t r, plen;
-
- /*
- * Convert length to a vector of bytes -- we do this now rather
- * than later because the length will change after we pad.
- */
- be64enc(len, ctx->count);
-
- /* Add 1--64 bytes so that the resulting length is 56 mod 64 */
- r = (ctx->count >> 3) & 0x3f;
- plen = (r < 56) ? (56 - r) : (120 - r);
- SHA256_Update(ctx, PAD, (size_t)plen);
-
- /* Add the terminating bit-count */
- SHA256_Update(ctx, len, 8);
-}
-
-/* SHA-256 initialization. Begins a SHA-256 operation. */
-void
-SHA256_Init(SHA256_CTX * ctx)
-{
-
- /* Zero bits processed so far */
- ctx->count = 0;
-
- /* Magic initialization constants */
- ctx->state[0] = 0x6A09E667;
- ctx->state[1] = 0xBB67AE85;
- ctx->state[2] = 0x3C6EF372;
- ctx->state[3] = 0xA54FF53A;
- ctx->state[4] = 0x510E527F;
- ctx->state[5] = 0x9B05688C;
- ctx->state[6] = 0x1F83D9AB;
- ctx->state[7] = 0x5BE0CD19;
-}
-
-/* Add bytes into the hash */
-void
-SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len)
-{
- uint64_t bitlen;
- uint32_t r;
- const unsigned char *src = in;
-
- /* Number of bytes left in the buffer from previous updates */
- r = (ctx->count >> 3) & 0x3f;
-
- /* Convert the length into a number of bits */
- bitlen = len << 3;
-
- /* Update number of bits */
- ctx->count += bitlen;
-
- /* Handle the case where we don't need to perform any transforms */
- if (len < 64 - r) {
- memcpy(&ctx->buf[r], src, len);
- return;
- }
-
- /* Finish the current block */
- memcpy(&ctx->buf[r], src, 64 - r);
- SHA256_Transform(ctx->state, ctx->buf);
- src += 64 - r;
- len -= 64 - r;
-
- /* Perform complete blocks */
- while (len >= 64) {
- SHA256_Transform(ctx->state, src);
- src += 64;
- len -= 64;
- }
-
- /* Copy left over data into buffer */
- memcpy(ctx->buf, src, len);
-}
-
-/*
- * SHA-256 finalization. Pads the input data, exports the hash value,
- * and clears the context state.
- */
-void
-SHA256_Final(unsigned char digest[32], SHA256_CTX * ctx)
-{
-
- /* Add padding */
- SHA256_Pad(ctx);
-
- /* Write the hash */
- be32enc_vect(digest, ctx->state, 32);
-
- /* Clear the context state */
- memset((void *)ctx, 0, sizeof(*ctx));
-}
-
-#ifdef WEAK_REFS
-/* When building libmd, provide weak references. Note: this is not
- activated in the context of compiling these sources for internal
- use in libcrypt.
- */
-#undef SHA256_Init
-__weak_reference(_libmd_SHA256_Init, SHA256_Init);
-#undef SHA256_Update
-__weak_reference(_libmd_SHA256_Update, SHA256_Update);
-#undef SHA256_Final
-__weak_reference(_libmd_SHA256_Final, SHA256_Final);
-#undef SHA256_Transform
-__weak_reference(_libmd_SHA256_Transform, SHA256_Transform);
-#endif
Index: share/mk/src.libnames.mk
===================================================================
--- share/mk/src.libnames.mk
+++ share/mk/src.libnames.mk
@@ -19,6 +19,7 @@
atf_c \
atf_cxx \
bsdstat \
+ cpufeats \
event \
heimipcc \
heimipcs \
Index: sys/conf/files.amd64
===================================================================
--- sys/conf/files.amd64
+++ sys/conf/files.amd64
@@ -152,12 +152,17 @@
no-implicit-rule \
clean "aesni_ghash.o"
aesni_wrap.o optional aesni \
- dependency "$S/crypto/aesni/aesni_wrap.c" \
+ dependency "$S/crypto/aesni/aesni_wrap.c $S/crypto/aesni/aesencdec.h" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \
no-implicit-rule \
clean "aesni_wrap.o"
crypto/blowfish/bf_enc.c optional crypto | ipsec
crypto/des/des_enc.c optional crypto | ipsec | netsmb
+sha256_sse4.o optional crypto | geom_bde | ipsec | random | \
+ sctp | zfs \
+ dependency "$S/crypto/sha2/sha256_sse4.o.uu" \
+ compile-with "uudecode < $S/crypto/sha2/sha256_sse4.o.uu" \
+ no-implicit-rule
crypto/via/padlock.c optional padlock
crypto/via/padlock_cipher.c optional padlock
crypto/via/padlock_hash.c optional padlock
Index: sys/crypto/sha2/Makefile
===================================================================
--- /dev/null
+++ sys/crypto/sha2/Makefile
@@ -0,0 +1,17 @@
+all: sha256_sse4.o.uu
+
+clean:
+ rm -f sha256_sse4.o.uu sha256_sse4.o
+
+YASM=yasm
+
+sha256_sse4.o: sha256_sse4.yasm
+ @if [ ! -x ${:!which ${YASM}!} ]; then \
+ echo Requires yasm to be installed; \
+ exit 1; \
+ fi
+ ${YASM} -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o ${.TARGET} \
+ ${.ALLSRC}
+
+sha256_sse4.o.uu: sha256_sse4.o
+ b64encode -o ${.TARGET} ${.ALLSRC} ${.ALLSRC}
Index: sys/crypto/sha2/sha256.h
===================================================================
--- sys/crypto/sha2/sha256.h
+++ sys/crypto/sha2/sha256.h
@@ -40,6 +40,38 @@
} SHA256_CTX;
__BEGIN_DECLS
+
+/* Ensure libmd symbols do not clash with libcrypto */
+
+#ifndef SHA256_Init
+#define SHA256_Init _libmd_SHA256_Init
+#endif
+#ifndef SHA256_Update
+#define SHA256_Update _libmd_SHA256_Update
+#endif
+#ifndef SHA256_Final
+#define SHA256_Final _libmd_SHA256_Final
+#endif
+#ifndef SHA256_End
+#define SHA256_End _libmd_SHA256_End
+#endif
+#ifndef SHA256_File
+#define SHA256_File _libmd_SHA256_File
+#endif
+#ifndef SHA256_FileChunk
+#define SHA256_FileChunk _libmd_SHA256_FileChunk
+#endif
+#ifndef SHA256_Data
+#define SHA256_Data _libmd_SHA256_Data
+#endif
+
+#ifndef SHA256_Transform
+#define SHA256_Transform _libmd_SHA256_Transform
+#endif
+#ifndef SHA256_version
+#define SHA256_version _libmd_SHA256_version
+#endif
+
void SHA256_Init(SHA256_CTX *);
void SHA256_Update(SHA256_CTX *, const void *, size_t);
void SHA256_Final(unsigned char [32], SHA256_CTX *);
Index: sys/crypto/sha2/sha256_sse4.o.uu
===================================================================
--- /dev/null
+++ sys/crypto/sha2/sha256_sse4.o.uu
@@ -0,0 +1,120 @@
+begin-base64 644 sha256_sse4.o
+f0VMRgIBAQAAAAAAAAAAAAEAPgABAAAAAAAAAAAAAAAAAAAAAAAAAMAWAAAAAAAAAAAAAEAAAAAA
+AEAADgABAFNVQVVBVkFXSIPsIEjB4gYPhNYMAABIAfpIiRQkiwaLXgSLTghEi0YMi1YQRItOFESL
+VhhEi14cZkQPbyUAAAAAZkQPbxUAAAAAZkQPbx0AAAAASI0tAAAAAPMPbydmQQ84AOTzD29vEGZB
+DzgA7PMPb3cgZkEPOAD08w9vfzBmQQ84APxIiXwkCEjHxwMAAABmZmZmZmYuDx+EAAAAAABmRA9v
+TQBmRA/+zGZED39MJBBmD2/HQYnVQcHNDkGJxmYPOg/GBEHBzglBMdVFic9Bwc0FZg9vzUExxkUx
+12YP/sRBMdVBIddBwc4LZg86D8wEQTHGQcHNBkUx12YPb9FBwc4CRQHvRAN8JBBmD2/ZQYnFRQH7
+QYnHZg9y8RlBCc1FAdhBIc9mD3LSB0Eh3UUB82YP68pFCf1FAetmD2/TRYnFRYneZkQPb8NBwc0O
+RTHFQYnXQcHOCWYPcvMORTHeQcHNBUUxz2YPctISQcHOC0UxxUUhx0HBzQZmD+/LRTHeRTHPZkEP
+ctADRQHvRAN8JBRBwc4CZg/vykWJ3UUB+kWJ32ZBD+/IQQndRAHRQSHfZg9w1/pBIcVFAfJmD/7B
+RQn9RQHqZg9v2kGJzUWJ1kHBzQ5mRA9vwkExzUHBzglFicdFMdZBwc0FZg9z0hFBMddmD3PTE0Ex
+zUEhz2ZBD3LQCkHBzgtFMdZBMddBwc0GZg/v00UB70HBzgJEA3wkGGZED+/CRYnVRQH5RYnXZkUP
+OADCQQnFRAHLQSHHZkEP/sBFId1FAfFmD3DQUEUJ/UUB6WYPb9pBid1Bwc0ORYnOZg9v4kHBzglB
+Md1Bic9Bwc0FZg9z0hFFMc5FMcdmD3PTE0Ex3UEh30HBzgtmD3LUCkUxzkHBzQZFMcdmD+/TQcHO
+AkUB70QDfCQcZg/v4kWJzUQB+kWJz2ZBDzgA40UJ3QHQRSHfZg/+4EUh1UQB8kUJ/UQB6mZED29N
+EGZED/7NZkQPf0wkEGYPb8RBicVBwc0OQYnWZg86D8cEQcHOCUExxUGJ30HBzQVmD2/OQTHWQTHP
+Zg/+xUExxUEhx0HBzgtmDzoPzQRBMdZBwc0GQTHPZg9v0UHBzgJFAe9EA3wkEGYPb9lBidVFAfhB
+iddmD3LxGUUJ1UUBw0Uh12YPctIHRSHNRQHwZg/rykUJ/UUB6GYPb9NFid1FicZmRA9vw0HBzQ5F
+Md1BicdBwc4JZg9y8w5FMcZBwc0FQTHfZg9y0hJBwc4LRTHdRSHfQcHNBmYP78tFMcZBMd9mQQ9y
+0ANFAe9EA3wkFEHBzgJmD+/KRYnFRAH5RYnHZkEP78hFCc1BAcpFIc9mD3DU+kEh1UQB8WYP/sFF
+Cf1EAelmD2/aRYnVQYnOQcHNDmZED2/CRTHVQcHOCUWJ30ExzkHBzQVmD3PSEUExx2YPc9MTRTHV
+RSHXZkEPctAKQcHOC0ExzkExx0HBzQZmD+/TRQHvQcHOAkQDfCQYZkQP78JBic1EAftBic9mRQ84
+AMJBCdVBAdlBIddmQQ/+wEUhxUQB82YPcNBQRQn9RAHrZg9v2kWJzUHBzQ5Bid5mD2/qQcHOCUUx
+zUWJ10HBzQVmD3PSEUEx3kUx32YPc9MTRTHNRSHPQcHOC2YPctUKQTHeQcHNBkUx32YP79NBwc4C
+RQHvRAN8JBxmD+/qQYndRAH4QYnfZkEPOADrRQnFAcJFIcdmD/7oQSHNRAHwRQn9RAHoZkQPb00g
+ZkQP/s5mRA9/TCQQZg9vxUGJ1UHBzQ5BicZmDzoPxARBwc4JQTHVRYnPQcHNBWYPb89BMcZFMddm
+D/7GQTHVQSHXQcHOC2YPOg/OBEExxkHBzQZFMddmD2/RQcHOAkUB70QDfCQQZg9v2UGJxUUB+0GJ
+x2YPcvEZQQnNRQHYQSHPZg9y0gdBId1FAfNmD+vKRQn9RQHrZg9v00WJxUWJ3mZED2/DQcHNDkUx
+xUGJ10HBzglmD3LzDkUx3kHBzQVFMc9mD3LSEkHBzgtFMcVFIcdBwc0GZg/vy0Ux3kUxz2ZBD3LQ
+A0UB70QDfCQUQcHOAmYP78pFid1FAfpFid9mQQ/vyEEJ3UQB0UEh32YPcNX6QSHFRQHyZg/+wUUJ
+/UUB6mYPb9pBic1FidZBwc0OZkQPb8JBMc1Bwc4JRYnHRTHWQcHNBWYPc9IRQTHXZg9z0xNBMc1B
+Ic9mQQ9y0ApBwc4LRTHWQTHXQcHNBmYP79NFAe9Bwc4CRAN8JBhmRA/vwkWJ1UUB+UWJ12ZFDzgA
+wkEJxUQBy0Ehx2ZBD/7ARSHdRQHxZg9w0FBFCf1FAelmD2/aQYndQcHNDkWJzmYPb/JBwc4JQTHd
+QYnPQcHNBWYPc9IRRTHORTHHZg9z0xNBMd1BId9Bwc4LZg9y1gpFMc5Bwc0GRTHHZg/v00HBzgJF
+Ae9EA3wkHGYP7/JFic1EAfpFic9mQQ84APNFCd0B0EUh32YP/vBFIdVEAfJFCf1EAepmRA9vTTBm
+RA/+z2ZED39MJBBIg8VAZg9vxkGJxUHBzQ5BidZmDzoPxQRBwc4JQTHFQYnfQcHNBWYPb8xBMdZB
+Mc9mD/7HQTHFQSHHQcHOC2YPOg/PBEEx1kHBzQZBMc9mD2/RQcHOAkUB70QDfCQQZg9v2UGJ1UUB
++EGJ12YPcvEZRQnVRQHDRSHXZg9y0gdFIc1FAfBmD+vKRQn9RQHoZg9v00WJ3UWJxmZED2/DQcHN
+DkUx3UGJx0HBzglmD3LzDkUxxkHBzQVBMd9mD3LSEkHBzgtFMd1FId9Bwc0GZg/vy0UxxkEx32ZB
+D3LQA0UB70QDfCQUQcHOAmYP78pFicVEAflFicdmQQ/vyEUJzUEBykUhz2YPcNb6QSHVRAHxZg/+
+wUUJ/UQB6WYPb9pFidVBic5Bwc0OZkQPb8JFMdVBwc4JRYnfQTHOQcHNBWYPc9IRQTHHZg9z0xNF
+MdVFIddmQQ9y0ApBwc4LQTHOQTHHQcHNBmYP79NFAe9Bwc4CRAN8JBhmRA/vwkGJzUQB+0GJz2ZF
+DzgAwkEJ1UEB2UEh12ZBD/7ARSHFRAHzZg9w0FBFCf1EAetmD2/aRYnNQcHNDkGJ3mYPb/pBwc4J
+RTHNRYnXQcHNBWYPc9IRQTHeRTHfZg9z0xNFMc1FIc9Bwc4LZg9y1wpBMd5Bwc0GRTHfZg/v00HB
+zgJFAe9EA3wkHGYP7/pBid1EAfhBid9mQQ84APtFCcUBwkUhx2YP/vhBIc1EAfBFCf1EAehIg+8B
+D4UO9///SMfHAgAAAGYP/mUAZg9/ZCQQQYnVQcHNDkGJxkEx1UHBzglFic9BMcZBwc0FRTHXQTHV
+QcHOC0Eh10ExxkHBzQZFMddFAe9Bwc4CRAN8JBBBicVFAftBicdBCc1FAdhBIc9BId1FAfNFCf1F
+AetFicVBwc0ORYneRTHFQcHOCUGJ10Ux3kHBzQVFMc9FMcVBwc4LRSHHRTHeQcHNBkUxz0UB70HB
+zgJEA3wkFEWJ3UUB+kWJ30EJ3UQB0UEh30EhxUUB8kUJ/UUB6kGJzUHBzQ5FidZBMc1Bwc4JRYnH
+RTHWQcHNBUEx10ExzUHBzgtBIc9FMdZBwc0GQTHXRQHvQcHOAkQDfCQYRYnVRQH5RYnXQQnFRAHL
+QSHHRSHdRQHxRQn9RQHpQYndQcHNDkWJzkEx3UHBzglBic9FMc5Bwc0FRTHHQTHdQcHOC0Eh30Ux
+zkHBzQZFMcdFAe9Bwc4CRAN8JBxFic1EAfpFic9FCd0B0EUh30Uh1UQB8kUJ/UQB6mYP/m0QZg9/
+bCQQSIPFIEGJxUHBzQ5BidZBMcVBwc4JQYnfQTHWQcHNBUExz0ExxUHBzgtBIcdBMdZBwc0GQTHP
+RQHvQcHOAkQDfCQQQYnVRQH4QYnXRQnVRQHDRSHXRSHNRQHwRQn9RQHoRYndQcHNDkWJxkUx3UHB
+zglBicdFMcZBwc0FQTHfRTHdQcHOC0Uh30UxxkHBzQZBMd9FAe9Bwc4CRAN8JBRFicVEAflFicdF
+Cc1BAcpFIc9BIdVEAfFFCf1EAelFidVBwc0OQYnORTHVQcHOCUWJ30ExzkHBzQVBMcdFMdVBwc4L
+RSHXQTHOQcHNBkExx0UB70HBzgJEA3wkGEGJzUQB+0GJz0EJ1UEB2UEh10UhxUQB80UJ/UQB60WJ
+zUHBzQ5Bid5FMc1Bwc4JRYnXQTHeQcHNBUUx30UxzUHBzgtFIc9BMd5Bwc0GRTHfRQHvQcHOAkQD
+fCQcQYndRAH4QYnfRQnFAcJFIcdBIc1EAfBFCf1EAehmD2/mZg9v70iD7wEPhfb8//8DBokGA14E
+iV4EA04IiU4IRANGDESJRgwDVhCJVhBEA04URIlOFEQDVhhEiVYYRANeHESJXhxIi3wkCEiDx0BI
+OzwkD4Vn8///SIPEIEFfQV5BXV1bwwAAAD0AAAAAAAAAAgAAAAYAAAD8AAAAAAAAAEYAAAAAAAAA
+AgAAAAYAAAAMAQAAAAAAAE8AAAAAAAAAAgAAAAYAAAAcAQAAAAAAAFYAAAAAAAAAAgAAAAYAAAD8
+/////////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJgvikKRRDdxz/vAtaXb
+telbwlY58RHxWaSCP5LVXhyrmKoH2AFbgxK+hTEkw30MVXRdvnL+sd6Apwbcm3Txm8HBaZvkhke+
+78adwQ/MoQwkbyzpLaqEdErcqbBc2oj5dlJRPphtxjGoyCcDsMd/Wb/zC+DGR5Gn1VFjygZnKSkU
+hQq3JzghGy78bSxNEw04U1RzCmW7Cmp2LsnCgYUscpKh6L+iS2YaqHCLS8KjUWzHGeiS0SQGmdaF
+NQ70cKBqEBbBpBkIbDceTHdIJ7W8sDSzDBw5SqrYTk/KnFvzby5o7oKPdG9jpXgUeMiECALHjPr/
+vpDrbFCk96P5vvJ4ccYDAgEABwYFBAsKCQgPDg0MAAECAwgJCgv/////////////////////AAEC
+AwgJCgvVAwAAAgAnAAAAAQH7Dg0AAQEBAQAAAAEAAAEAc2hhMjU2X3NzZTQueWFzbQAAAAAAAAkC
+AAAAAAAAAAAD+AIBJSEvLzADC0pLZz1NLz09Sz1LS0yRkZN3SmdYZ1hnWGhbdeZnWXVKPEo8Zko8
+PEpKPDxKPDxKZjxKPEpKPFhKPDw8WDw8PFg8PEo8PEo8PFhKPDxKWDxKPFhKPDxKSjw8ZjxYSko8
+PDxYPDw8WDw8Sjw8Sjw8Slg8Sjw8Slg8WDw8Zko8PEpKPEpYWDw8PGY8PDxYPDxYPDxKPEo8Sko8
+PEpYPDxYPDxKWDxKPEpKPFhKPDw8ZjwuPEo8PDw+Z1l1SjxKPGZKPDxKSjw8Sjw8SmY8SjxKSjxY
+Sjw8PFg8PDxYPDxKPDxKPDxYSjw8Slg8SjxYSjw8Sko8PGY8WEpKPDw8WDw8PFg8PEo8PEo8PEpY
+PEo8PEpYPFg8PGZKPDxKSjxKWFg8PDxmPDw8WDw8WDw8SjxKPEpKPDxKWDw8WDw8Slg8SjxKSjxY
+Sjw8PGY8LjxKPDw8PmdZdUo8SjxmSjw8Sko8PEo8PEpmPEo8Sko8WEo8PDxYPDw8WDw8Sjw8Sjw8
+WEo8PEpYPEo8WEo8PEpKPDxmPFhKSjw8PFg8PDxYPDxKPDxKPDxKWDxKPDxKWDxYPDxmSjw8Sko8
+SlhYPDw8Zjw8PFg8PFg8PEo8SjxKSjw8Slg8PFg8PEpYPEo8Sko8WEo8PDxmPC48Sjw8PD5nWXVL
+SjxKPGZKPDxKSjw8Sjw8SmY8SjxKSjxYSjw8PFg8PDxYPDxKPDxKPDxYSjw8Slg8SjxYSjw8Sko8
+PGY8WEpKPDw8WDw8PFg8PEo8PEo8PEpYPEo8PEpYPFg8PGZKPDxKSjxKWFg8PDxmPDw8WDw8WDw8
+SjxKPEpKPDxKWDw8WDw8Slg8SjxKSjxYSjw8PGY8LjxKPDw8PktodllnPEo8PEo8PEo8PEo8PEo8
+PEpYPDw8PDw8PDw8PTxKPDxKPDxKPDxKPDxKPDxKWDw8PDw8PDw8PD08Sjw8Sjw8Sjw8Sjw8Sjw8
+Slg8PDw8PDw8PDw9PEo8PEo8PEo8PEo8PEo8PEpYPDw8PC48PDw8PVlnSzxKPDxKPDxKPDxKPDxK
+PDxKWDw8PDw8PDw8PD08Sjw8Sjw8Sjw8Sjw8Sjw8Slg8PDw8PDw8PDw9PEo8PEo8PEo8PEo8PEo8
+PEpYPDw8PDw8PDw8PTxKPDxKPDxKPDxKPDxKPDxKWDw8PDwuPDw8PD5LTEtoLi88PTw9Sks8PUpL
+SktKTFlLSwMNZkwvLy8lIgIBAAEBAAAANAAAAAAAAAABAAAADwAAAAAAAAAAAAAAAREAEAYRARIB
+AwgbCCUIEwUAAABSAAAAAgAAAAAACAEAAAAAAAAAAAAAAAAAAAAAAAAAAHNoYTI1Nl9zc2U0Lnlh
+c20AL2Evc3JjL3N5cy9jcnlwdG8vc2hhMi8AeWFzbSAxLjIuMAABgAAABgAAAAAAAAAKAAAABAAA
+AAAAAAAAAAAADAAAAAAAAAAKAAAABQAAAAAAAAAAAAAAEAAAAAAAAAABAAAADwAAAAAAAAAAAAAA
+GAAAAAAAAAABAAAADwAAAPkMAAAAAAAALAAAAAIAAAAAAAgAAAAAAAAAAAAAAAAA+QwAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAABgAAAAAAAAAKAAAAAwAAAAAAAAAAAAAAEAAAAAAAAAABAAAADwAAAAAA
+AAAAAAAAAC50ZXh0AC5kYXRhAC5kZWJ1Z19saW5lAC5kZWJ1Z19hYmJyZXYALmRlYnVnX2luZm8A
+LmRlYnVnX2FyYW5nZXMALnJlbGEudGV4dAAucmVsYS5kZWJ1Z19saW5lAC5yZWxhLmRlYnVnX2lu
+Zm8ALnJlbGEuZGVidWdfYXJhbmdlcwAuc3RydGFiAC5zeW10YWIALnNoc3RydGFiAAAAAABzaGEy
+NTZfc3NlNC55YXNtAHNoYTI1Nl9zc2U0AGRvbmVfaGFzaABQU0hVRkZMRV9CWVRFX0ZMSVBfTUFT
+SwBfU0hVRl8wMEJBAF9TSFVGX0RDMDAAbG9vcDAASzI1NgBsb29wMQBsb29wMgAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAABAAAABADx/wAAAAAAAAAAAAAAAAAAAAAAAAAAAwAMAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAwAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAJAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAGAAAAAAAAAAAAAAAAAAAAAABnAAAAAAAEAJkJ
+AAAAAAAAAAAAAAAAAABhAAAAAAAEAKAAAAAAAAAAAAAAAAAAAABcAAAAAAAGAAAAAAAAAAAAAAAA
+AAAAAABWAAAAAAAEAFMAAAAAAAAAAAAAAAAAAABLAAAAAAAGACABAAAAAAAAAAAAAAAAAABAAAAA
+AAAGABABAAAAAAAAAAAAAAAAAAAoAAAAAAAGAAABAAAAAAAAAAAAAAAAAAAeAAAAAAAEAOwMAAAA
+AAAAAAAAAAAAAAAAAAAAAwAEAAAAAAAAAAAAAAAAAAAAAAASAAAAEAAEAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAACTAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAQFAAAAAAAAJ0AAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgwAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAsBQAAAAAAABt
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIsAAAACAAAAAAAAAAAAAAAAAAAAAAAAACAV
+AAAAAAAAmAEAAAAAAAACAAAAEAAAAAgAAAAAAAAAGAAAAAAAAAABAAAAAQAAAAYAAAAAAAAAAAAA
+AAAAAABAAAAAAAAAAPkMAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAQgAAAAQAAAAAAAAA
+AAAAAAAAAAAAAAAAPA0AAAAAAABgAAAAAAAAAAMAAAAEAAAACAAAAAAAAAAYAAAAAAAAAAcAAAAB
+AAAAAwAAAAAAAAAAAAAAAAAAAMANAAAAAAAAMAEAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAA
+AAANAAAAAQAAAAAAAAAAAAAAAAAAAAAAAADwDgAAAAAAANkDAAAAAAAAAAAAAAAAAAABAAAAAAAA
+AAAAAAAAAAAATQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAzBIAAAAAAAAYAAAAAAAAAAMAAAAHAAAA
+CAAAAAAAAAAYAAAAAAAAABkAAAABAAAAAAAAAAAAAAAAAAAAAAAAAOQSAAAAAAAAFAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAnAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAD4EgAAAAAAAFYA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAXgAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAUBMA
+AAAAAABgAAAAAAAAAAMAAAAKAAAACAAAAAAAAAAYAAAAAAAAADMAAAABAAAAAAAAAAAAAAAAAAAA
+AAAAALATAAAAAAAAMAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAABvAAAABAAAAAAAAAAA
+AAAAAAAAAAAAAADgEwAAAAAAADAAAAAAAAAAAwAAAAwAAAAIAAAAAAAAABgAAAAAAAAA
+====
Index: sys/crypto/sha2/sha256_sse4.yasm
===================================================================
--- /dev/null
+++ sys/crypto/sha2/sha256_sse4.yasm
@@ -0,0 +1,544 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Copyright (c) 2012, Intel Corporation
+;
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are
+; met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+;
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the
+; distribution.
+;
+; * Neither the name of the Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived from
+; this software without specific prior written permission.
+;
+;
+; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
+; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Example YASM command lines:
+; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_sse4.obj -g cv8 sha256_sse4.asm
+; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_sse4.o sha256_sse4.asm
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; This code is described in an Intel White-Paper:
+; "Fast SHA-256 Implementations on Intel Architecture Processors"
+;
+; To find it, surf to http://www.intel.com/p/en_US/embedded
+; and search for that title.
+; The paper is expected to be released roughly at the end of April, 2012
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; This code schedules 1 blocks at a time, with 4 lanes per block
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define MOVDQ movdqu ;; assume buffers not aligned
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
+
+; addm [mem], reg
+; Add reg to mem using reg-mem add and store
+%macro addm 2
+ add %2, %1
+ mov %1, %2
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
+; Load xmm with mem and byte swap each dword
+%macro COPY_XMM_AND_BSWAP 3
+ MOVDQ %1, %2
+ pshufb %1, %3
+%endmacro
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+%define X0 xmm4
+%define X1 xmm5
+%define X2 xmm6
+%define X3 xmm7
+
+%define XTMP0 xmm0
+%define XTMP1 xmm1
+%define XTMP2 xmm2
+%define XTMP3 xmm3
+%define XTMP4 xmm8
+%define XFER xmm9
+
+%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA
+%define SHUF_DC00 xmm11 ; shuffle xDxC -> DC00
+%define BYTE_FLIP_MASK xmm12
+
+%ifdef LINUX
+%define NUM_BLKS rdx ; 3rd arg
+%define CTX rsi ; 2nd arg
+%define INP rdi ; 1st arg
+
+%define SRND rdi ; clobbers INP
+%define c ecx
+%define d r8d
+%define e edx
+%else
+%define NUM_BLKS r8 ; 3rd arg
+%define CTX rdx ; 2nd arg
+%define INP rcx ; 1st arg
+
+%define SRND rcx ; clobbers INP
+%define c edi
+%define d esi
+%define e r8d
+
+%endif
+%define TBL rbp
+%define a eax
+%define b ebx
+
+%define f r9d
+%define g r10d
+%define h r11d
+
+%define y0 r13d
+%define y1 r14d
+%define y2 r15d
+
+
+
+_INP_END_SIZE equ 8
+_INP_SIZE equ 8
+_XFER_SIZE equ 8
+%ifdef LINUX
+_XMM_SAVE_SIZE equ 0
+%else
+_XMM_SAVE_SIZE equ 7*16
+%endif
+; STACK_SIZE plus pushes must be an odd multiple of 8
+_ALIGN_SIZE equ 8
+
+_INP_END equ 0
+_INP equ _INP_END + _INP_END_SIZE
+_XFER equ _INP + _INP_SIZE
+_XMM_SAVE equ _XFER + _XFER_SIZE + _ALIGN_SIZE
+STACK_SIZE equ _XMM_SAVE + _XMM_SAVE_SIZE
+
+; rotate_Xs
+; Rotate values of symbols X0...X3
+%macro rotate_Xs 0
+%xdefine X_ X0
+%xdefine X0 X1
+%xdefine X1 X2
+%xdefine X2 X3
+%xdefine X3 X_
+%endm
+
+; ROTATE_ARGS
+; Rotate values of symbols a...h
+%macro ROTATE_ARGS 0
+%xdefine TMP_ h
+%xdefine h g
+%xdefine g f
+%xdefine f e
+%xdefine e d
+%xdefine d c
+%xdefine c b
+%xdefine b a
+%xdefine a TMP_
+%endm
+
+%macro FOUR_ROUNDS_AND_SCHED 0
+ ;; compute s0 four at a time and s1 two at a time
+ ;; compute W[-16] + W[-7] 4 at a time
+ movdqa XTMP0, X3
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ palignr XTMP0, X2, 4 ; XTMP0 = W[-7]
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ movdqa XTMP1, X1
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+ paddd XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ ;; compute s0
+ palignr XTMP1, X0, 4 ; XTMP1 = W[-15]
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ movdqa XTMP2, XTMP1 ; XTMP2 = W[-15]
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH
+ movdqa XTMP3, XTMP1 ; XTMP3 = W[-15]
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pslld XTMP1, (32-7)
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ psrld XTMP2, 7
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ por XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP2, XTMP3 ; XTMP2 = W[-15]
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ movdqa XTMP4, XTMP3 ; XTMP4 = W[-15]
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ pslld XTMP3, (32-18)
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+ psrld XTMP2, 18
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ pxor XTMP1, XTMP3
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ psrld XTMP4, 3 ; XTMP4 = W[-15] >> 3
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ pxor XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pxor XTMP1, XTMP4 ; XTMP1 = s0
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ ;; compute low s1
+ pshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ paddd XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA}
+ mov y0, e ; y0 = e
+ mov y1, a ; y1 = a
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ movdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA}
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA}
+ xor y2, g ; y2 = f^g
+ psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xBxA}
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ psrld XTMP4, 10 ; XTMP4 = W[-2] >> 10 {BBAA}
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ pxor XTMP2, XTMP3
+ add y2, y0 ; y2 = S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH
+ pxor XTMP4, XTMP2 ; XTMP4 = s1 {xBxA}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pshufb XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ paddd XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ ;; compute high s1
+ pshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC}
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+ movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC}
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ movdqa X0, XTMP2 ; X0 = W[-2] {DDCC}
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ mov y2, f ; y2 = f
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC}
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ xor y2, g ; y2 = f^g
+ psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xDxC}
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ and y2, e ; y2 = (f^g)&e
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ psrld X0, 10 ; X0 = W[-2] >> 10 {DDCC}
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ pxor XTMP2, XTMP3
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, y0 ; y2 = S1 + CH
+ add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH
+ pxor X0, XTMP2 ; X0 = s1 {xDxC}
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ pshufb X0, SHUF_DC00 ; X0 = s1 {DC00}
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ paddd X0, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]}
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+
+ROTATE_ARGS
+rotate_Xs
+%endm
+
+;; input is [rsp + _XFER + %1 * 4]
+%macro DO_ROUND 1
+ mov y0, e ; y0 = e
+ ror y0, (25-11) ; y0 = e >> (25-11)
+ mov y1, a ; y1 = a
+ xor y0, e ; y0 = e ^ (e >> (25-11))
+ ror y1, (22-13) ; y1 = a >> (22-13)
+ mov y2, f ; y2 = f
+ xor y1, a ; y1 = a ^ (a >> (22-13)
+ ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6))
+ xor y2, g ; y2 = f^g
+ xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6))
+ ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2))
+ and y2, e ; y2 = (f^g)&e
+ xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2))
+ ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25)
+ xor y2, g ; y2 = CH = ((f^g)&e)^g
+ add y2, y0 ; y2 = S1 + CH
+ ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22)
+ add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH
+ mov y0, a ; y0 = a
+ add h, y2 ; h = h + S1 + CH + k + w
+ mov y2, a ; y2 = a
+ or y0, c ; y0 = a|c
+ add d, h ; d = d + h + S1 + CH + k + w
+ and y2, c ; y2 = a&c
+ and y0, b ; y0 = (a|c)&b
+ add h, y1 ; h = h + S1 + CH + k + w + S0
+ or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
+ add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ
+ ROTATE_ARGS
+%endm
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks)
+;; arg 1 : pointer to input data
+;; arg 2 : pointer to digest
+;; arg 3 : Num blocks
+section .text
+global sha256_sse4
+align 32
+sha256_sse4:
+ push rbx
+%ifndef LINUX
+ push rsi
+ push rdi
+%endif
+ push rbp
+ push r13
+ push r14
+ push r15
+
+ sub rsp,STACK_SIZE
+%ifndef LINUX
+ movdqa [rsp + _XMM_SAVE + 0*16],xmm6
+ movdqa [rsp + _XMM_SAVE + 1*16],xmm7
+ movdqa [rsp + _XMM_SAVE + 2*16],xmm8
+ movdqa [rsp + _XMM_SAVE + 3*16],xmm9
+ movdqa [rsp + _XMM_SAVE + 4*16],xmm10
+ movdqa [rsp + _XMM_SAVE + 5*16],xmm11
+ movdqa [rsp + _XMM_SAVE + 6*16],xmm12
+%endif
+
+ shl NUM_BLKS, 6 ; convert to bytes
+ jz done_hash
+ add NUM_BLKS, INP ; pointer to end of data
+ mov [rsp + _INP_END], NUM_BLKS
+
+ ;; load initial digest
+ mov a,[4*0 + CTX]
+ mov b,[4*1 + CTX]
+ mov c,[4*2 + CTX]
+ mov d,[4*3 + CTX]
+ mov e,[4*4 + CTX]
+ mov f,[4*5 + CTX]
+ mov g,[4*6 + CTX]
+ mov h,[4*7 + CTX]
+
+ movdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip]
+ movdqa SHUF_00BA, [_SHUF_00BA wrt rip]
+ movdqa SHUF_DC00, [_SHUF_DC00 wrt rip]
+
+loop0:
+ lea TBL,[K256 wrt rip]
+
+ ;; byte swap first 16 dwords
+ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
+ COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
+
+ mov [rsp + _INP], INP
+
+ ;; schedule 48 input dwords, by doing 3 rounds of 16 each
+ mov SRND, 3
+align 16
+loop1:
+ movdqa XFER, [TBL + 0*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 1*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 2*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ FOUR_ROUNDS_AND_SCHED
+
+ movdqa XFER, [TBL + 3*16]
+ paddd XFER, X0
+ movdqa [rsp + _XFER], XFER
+ add TBL, 4*16
+ FOUR_ROUNDS_AND_SCHED
+
+ sub SRND, 1
+ jne loop1
+
+ mov SRND, 2
+loop2:
+ paddd X0, [TBL + 0*16]
+ movdqa [rsp + _XFER], X0
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+ paddd X1, [TBL + 1*16]
+ movdqa [rsp + _XFER], X1
+ add TBL, 2*16
+ DO_ROUND 0
+ DO_ROUND 1
+ DO_ROUND 2
+ DO_ROUND 3
+
+ movdqa X0, X2
+ movdqa X1, X3
+
+ sub SRND, 1
+ jne loop2
+
+ addm [4*0 + CTX],a
+ addm [4*1 + CTX],b
+ addm [4*2 + CTX],c
+ addm [4*3 + CTX],d
+ addm [4*4 + CTX],e
+ addm [4*5 + CTX],f
+ addm [4*6 + CTX],g
+ addm [4*7 + CTX],h
+
+ mov INP, [rsp + _INP]
+ add INP, 64
+ cmp INP, [rsp + _INP_END]
+ jne loop0
+
+done_hash:
+%ifndef LINUX
+ movdqa xmm6,[rsp + _XMM_SAVE + 0*16]
+ movdqa xmm7,[rsp + _XMM_SAVE + 1*16]
+ movdqa xmm8,[rsp + _XMM_SAVE + 2*16]
+ movdqa xmm9,[rsp + _XMM_SAVE + 3*16]
+ movdqa xmm10,[rsp + _XMM_SAVE + 4*16]
+ movdqa xmm11,[rsp + _XMM_SAVE + 5*16]
+ movdqa xmm12,[rsp + _XMM_SAVE + 6*16]
+%endif
+
+ add rsp, STACK_SIZE
+
+ pop r15
+ pop r14
+ pop r13
+ pop rbp
+%ifndef LINUX
+ pop rdi
+ pop rsi
+%endif
+ pop rbx
+
+ ret
+
+
+section .data
+align 64
+K256:
+ dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+ dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+ dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+ dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+ dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+ dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+ dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+ dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+ dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+ dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+ dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+ dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+ dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+ dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+ dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+ dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+PSHUFFLE_BYTE_FLIP_MASK: ddq 0x0c0d0e0f08090a0b0405060700010203
+
+; shuffle xBxA -> 00BA
+_SHUF_00BA: ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100
+
+; shuffle xDxC -> DC00
+_SHUF_DC00: ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF
Index: sys/crypto/sha2/sha256c.c
===================================================================
--- sys/crypto/sha2/sha256c.c
+++ sys/crypto/sha2/sha256c.c
@@ -32,6 +32,8 @@
#ifdef _KERNEL
#include <sys/systm.h>
+#include <sys/param.h>
+#include <sys/proc.h>
#else
#include <string.h>
#endif
@@ -194,7 +196,120 @@
state[i] += S[i];
}
-static unsigned char PAD[64] = {
+static void
+SHA256_TransformBlocks(const void *data, uint32_t state[8], uint64_t num_blks)
+{
+
+ while (num_blks-- > 0) {
+ SHA256_Transform(state, data);
+ data = (const char *)data + 64;
+ }
+}
+
+#if !defined(__amd64__)
+#define SHA256_TransformFunction(a,b,c) SHA256_TransformBlocks((a), (b), (c))
+#else /* defined(__amd64__) */
+#ifdef _KERNEL
+#include <sys/types.h>
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+#include <machine/md_var.h>
+#include <machine/fpu.h>
+#else
+#include <cpufeats.h>
+#endif
+
+#define SHA256_USEFUNCTIONS 0
+void sha256_sse4(const void *input_data, uint32_t digest[8], uint64_t num_blks);
+static void setup_transformfunct(void **cookieptr);
+
+static void (*SHA256_TransformFunction)(const void *, uint32_t digest[8], uint64_t) = SHA256_TransformBlocks;
+
+#ifdef _KERNEL
+static void (*SHA256_StartFunction)(void **ptr) = setup_transformfunct;
+static void (*SHA256_StopFunction)(void **ptr);
+
+static void
+SHA256_sse4_startfunction(void **cookieptr)
+{
+ struct thread *td;
+ struct fpu_kern_ctx **ctxptr;
+ int error;
+
+ if (is_fpu_kern_thread(0))
+ *cookieptr = NULL;
+ else {
+ ctxptr = (struct fpu_kern_ctx **)cookieptr;
+ td = curthread;
+ *ctxptr = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
+ error = fpu_kern_enter(td, *ctxptr, FPU_KERN_NORMAL);
+ KASSERT(error == 0, ("fpu_kern_enter returned non-zero"));
+ }
+}
+
+static void
+SHA256_sse4_stopfunction(void **cookieptr)
+{
+ struct thread *td;
+ struct fpu_kern_ctx **ctxptr;
+
+ ctxptr = (struct fpu_kern_ctx **)cookieptr;
+ td = curthread;
+
+ if (*ctxptr != NULL) {
+ fpu_kern_leave(td, *ctxptr);
+ fpu_kern_free_ctx(*ctxptr);
+ *ctxptr = NULL;
+ }
+}
+
+static void
+setup_transformfunct(void **cookieptr)
+{
+ u_int cf, cf2;
+
+ /* XXX - need to solve FPU issue */
+ cf = cpu_feature;
+ cf2 = cpu_feature2;
+
+ if ((cf & CPUID_SSE2) == CPUID_SSE2 &&
+ (cf2 & (CPUID2_SSE3|CPUID2_SSSE3)) == (CPUID2_SSE3|CPUID2_SSSE3)) {
+ SHA256_StopFunction = SHA256_sse4_stopfunction;
+ SHA256_TransformFunction = sha256_sse4;
+ atomic_store_rel_ptr((volatile u_long *)&SHA256_StartFunction,
+ (uintptr_t)SHA256_sse4_startfunction);
+ } else
+ {
+ SHA256_StopFunction = NULL;
+ SHA256_TransformFunction = SHA256_TransformBlocks;
+ atomic_store_rel_ptr((volatile u_long *)&SHA256_StartFunction,
+ (uintptr_t)NULL);
+ }
+
+ if (SHA256_StartFunction != NULL)
+ SHA256_StartFunction(cookieptr);
+}
+#else /* !_KERNEL */
+
+static void __attribute__((constructor))
+transfunc_init()
+{
+ u_int cf, cf2;
+
+ cf = cpu_feature;
+ cf2 = cpu_feature2;
+
+ if ((cf & CPUID_SSE2) == CPUID_SSE2 &&
+ (cf2 & (CPUID2_SSE3|CPUID2_SSSE3)) == (CPUID2_SSE3|CPUID2_SSSE3))
+ SHA256_TransformFunction = sha256_sse4;
+ else
+ SHA256_TransformFunction = SHA256_TransformBlocks;
+}
+#endif /* !_KERNEL */
+#endif /* Transform Block */
+
+static const unsigned char PAD[64] = {
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -244,8 +359,11 @@
/* Add bytes into the hash */
void
-SHA256_Update(SHA256_CTX * ctx, const void *in, size_t len)
+SHA256_Update(SHA256_CTX *ctx, const void *in, size_t len)
{
+#if defined(SHA256_USEFUNCTIONS) && defined(_KERNEL)
+ void *cookie;
+#endif
uint64_t bitlen;
uint32_t r;
const unsigned char *src = in;
@@ -265,18 +383,26 @@
return;
}
+#if defined(SHA256_USEFUNCTIONS) && defined(_KERNEL)
+ if (SHA256_StartFunction != NULL)
+ SHA256_StartFunction(&cookie);
+#endif
+
/* Finish the current block */
memcpy(&ctx->buf[r], src, 64 - r);
- SHA256_Transform(ctx->state, ctx->buf);
+ SHA256_TransformFunction(ctx->buf, ctx->state, 1);
src += 64 - r;
len -= 64 - r;
/* Perform complete blocks */
- while (len >= 64) {
- SHA256_Transform(ctx->state, src);
- src += 64;
- len -= 64;
- }
+ SHA256_TransformFunction(src, ctx->state, len / 64);
+ src += len / 64 * 64;
+ len -= len / 64 * 64;
+
+#if defined(SHA256_USEFUNCTIONS) && defined(_KERNEL)
+ if (SHA256_StopFunction != NULL)
+ SHA256_StopFunction(&cookie);
+#endif
/* Copy left over data into buffer */
memcpy(ctx->buf, src, len);
@@ -299,3 +425,18 @@
/* Clear the context state */
memset((void *)ctx, 0, sizeof(*ctx));
}
+
+#ifdef WEAK_REFS
+/* When building libmd, provide weak references. Note: this is not
+ activated in the context of compiling these sources for internal
+ use in libcrypt.
+ */
+#undef SHA256_Init
+__weak_reference(_libmd_SHA256_Init, SHA256_Init);
+#undef SHA256_Update
+__weak_reference(_libmd_SHA256_Update, SHA256_Update);
+#undef SHA256_Final
+__weak_reference(_libmd_SHA256_Final, SHA256_Final);
+#undef SHA256_Transform
+__weak_reference(_libmd_SHA256_Transform, SHA256_Transform);
+#endif

File Metadata

Mime Type
text/plain
Expires
Fri, Apr 10, 2:18 AM (12 h, 1 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31190061
Default Alt Text
D2651.diff (52 KB)

Event Timeline