diff --git a/module/icp/asm-x86_64/blake3/blake3_avx2.S b/module/icp/asm-x86_64/blake3/blake3_avx2.S index 8f9e766486f1..0ebec5c1095e 100644 --- a/module/icp/asm-x86_64/blake3/blake3_avx2.S +++ b/module/icp/asm-x86_64/blake3/blake3_avx2.S @@ -1,1829 +1,1828 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3 * Copyright (c) 2019-2020 Samuel Neves * Copyright (c) 2022 Tino Reichardt */ #if defined(HAVE_AVX2) #define _ASM #include .intel_syntax noprefix .text ENTRY_ALIGN(zfs_blake3_hash_many_avx2, 64) ENDBR push r15 push r14 push r13 push r12 push rbx push rbp mov rbp, rsp sub rsp, 680 and rsp, 0xFFFFFFFFFFFFFFC0 neg r9d vmovd xmm0, r9d vpbroadcastd ymm0, xmm0 vmovdqa ymmword ptr [rsp+0x280], ymm0 vpand ymm1, ymm0, ymmword ptr [ADD0+rip] vpand ymm2, ymm0, ymmword ptr [ADD1+rip] vmovdqa ymmword ptr [rsp+0x220], ymm2 vmovd xmm2, r8d vpbroadcastd ymm2, xmm2 vpaddd ymm2, ymm2, ymm1 vmovdqa ymmword ptr [rsp+0x240], ymm2 vpxor ymm1, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm2, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm1, ymm2 shr r8, 32 vmovd xmm3, r8d vpbroadcastd ymm3, xmm3 vpsubd ymm3, ymm3, ymm2 vmovdqa ymmword ptr [rsp+0x260], ymm3 shl rdx, 6 mov qword ptr [rsp+0x2A0], rdx cmp rsi, 8 jc 3f 2: vpbroadcastd ymm0, dword ptr [rcx] vpbroadcastd ymm1, dword ptr [rcx+0x4] vpbroadcastd ymm2, dword ptr [rcx+0x8] vpbroadcastd ymm3, dword ptr [rcx+0xC] vpbroadcastd ymm4, dword ptr [rcx+0x10] vpbroadcastd ymm5, dword ptr [rcx+0x14] vpbroadcastd ymm6, dword ptr [rcx+0x18] vpbroadcastd ymm7, dword ptr [rcx+0x1C] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] mov r12, qword ptr [rdi+0x20] mov r13, qword ptr [rdi+0x28] mov r14, qword ptr [rdi+0x30] mov r15, qword ptr [rdi+0x38] movzx eax, byte ptr [rbp+0x38] movzx ebx, byte ptr [rbp+0x40] or eax, ebx xor edx, edx .p2align 5 9: movzx ebx, byte ptr [rbp+0x48] or ebx, eax add rdx, 64 cmp rdx, qword ptr [rsp+0x2A0] cmove eax, ebx mov dword ptr [rsp+0x200], eax vmovups xmm8, xmmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x40], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x40] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x40], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x40], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x40] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x40], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x20], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x40], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x60], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x30] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x30], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x30], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x30] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x30], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x30], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x80], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0xA0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0xC0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0xE0], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x20], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x20] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x20], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x20], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x20] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x20], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x100], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x120], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x140], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x160], ymm11 vmovups xmm8, xmmword ptr [r8+rdx-0x10] vinsertf128 ymm8, ymm8, xmmword ptr [r12+rdx-0x10], 0x01 vmovups xmm9, xmmword ptr [r9+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r13+rdx-0x10], 0x01 vunpcklpd ymm12, ymm8, ymm9 vunpckhpd ymm13, ymm8, ymm9 vmovups xmm10, xmmword ptr [r10+rdx-0x10] vinsertf128 ymm10, ymm10, xmmword ptr [r14+rdx-0x10], 0x01 vmovups xmm11, xmmword ptr [r11+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r15+rdx-0x10], 0x01 vunpcklpd ymm14, ymm10, ymm11 vunpckhpd ymm15, ymm10, ymm11 vshufps ymm8, ymm12, ymm14, 136 vmovaps ymmword ptr [rsp+0x180], ymm8 vshufps ymm9, ymm12, ymm14, 221 vmovaps ymmword ptr [rsp+0x1A0], ymm9 vshufps ymm10, ymm13, ymm15, 136 vmovaps ymmword ptr [rsp+0x1C0], ymm10 vshufps ymm11, ymm13, ymm15, 221 vmovaps ymmword ptr [rsp+0x1E0], ymm11 vpbroadcastd ymm15, dword ptr [rsp+0x200] prefetcht0 [r8+rdx+0x80] prefetcht0 [r12+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r13+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r14+rdx+0x80] prefetcht0 [r11+rdx+0x80] prefetcht0 [r15+rdx+0x80] vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm0, ymmword ptr [rsp+0x240] vpxor ymm13, ymm1, ymmword ptr [rsp+0x260] vpxor ymm14, ymm2, ymmword ptr [BLAKE3_BLOCK_LEN+rip] vpxor ymm15, ymm3, ymm15 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [BLAKE3_IV_0+rip] vpaddd ymm9, ymm13, ymmword ptr [BLAKE3_IV_1+rip] vpaddd ymm10, ymm14, ymmword ptr [BLAKE3_IV_2+rip] vpaddd ymm11, ymm15, ymmword ptr [BLAKE3_IV_3+rip] vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x100] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xE0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x20] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1A0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xC0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x160] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xA0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1C0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x80] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xA0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x180] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x120] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x1E0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1C0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x140] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0xE0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x40] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x60] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x120] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x160] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x100] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1E0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x180] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x20] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1A0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x40] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x80] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x60] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x140] vpaddd ymm2, ymm2, ymmword ptr [rsp+0xC0] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x160] vpaddd ymm1, ymm1, ymmword ptr [rsp+0xA0] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x20] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x100] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1E0] vpaddd ymm1, ymm1, ymmword ptr [rsp] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x120] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xC0] vpaddd ymm0, ymm0, ymm4 vpaddd ymm1, ymm1, ymm5 vpaddd ymm2, ymm2, ymm6 vpaddd ymm3, ymm3, ymm7 vpxor ymm12, ymm12, ymm0 vpxor ymm13, ymm13, ymm1 vpxor ymm14, ymm14, ymm2 vpxor ymm15, ymm15, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpshufb ymm15, ymm15, ymm8 vpaddd ymm8, ymm12, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm13 vpaddd ymm10, ymm10, ymm14 vpaddd ymm11, ymm11, ymm15 vpxor ymm4, ymm4, ymm8 vpxor ymm5, ymm5, ymm9 vpxor ymm6, ymm6, ymm10 vpxor ymm7, ymm7, ymm11 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x1C0] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x40] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x60] vpaddd ymm3, ymm3, ymmword ptr [rsp+0xE0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT16+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vmovdqa ymmword ptr [rsp+0x200], ymm8 vpsrld ymm8, ymm5, 12 vpslld ymm5, ymm5, 20 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 12 vpslld ymm6, ymm6, 20 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 12 vpslld ymm7, ymm7, 20 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 12 vpslld ymm4, ymm4, 20 vpor ymm4, ymm4, ymm8 vpaddd ymm0, ymm0, ymmword ptr [rsp+0x140] vpaddd ymm1, ymm1, ymmword ptr [rsp+0x180] vpaddd ymm2, ymm2, ymmword ptr [rsp+0x80] vpaddd ymm3, ymm3, ymmword ptr [rsp+0x1A0] vpaddd ymm0, ymm0, ymm5 vpaddd ymm1, ymm1, ymm6 vpaddd ymm2, ymm2, ymm7 vpaddd ymm3, ymm3, ymm4 vpxor ymm15, ymm15, ymm0 vpxor ymm12, ymm12, ymm1 vpxor ymm13, ymm13, ymm2 vpxor ymm14, ymm14, ymm3 vbroadcasti128 ymm8, xmmword ptr [ROT8+rip] vpshufb ymm15, ymm15, ymm8 vpshufb ymm12, ymm12, ymm8 vpshufb ymm13, ymm13, ymm8 vpshufb ymm14, ymm14, ymm8 vpaddd ymm10, ymm10, ymm15 vpaddd ymm11, ymm11, ymm12 vpaddd ymm8, ymm13, ymmword ptr [rsp+0x200] vpaddd ymm9, ymm9, ymm14 vpxor ymm5, ymm5, ymm10 vpxor ymm6, ymm6, ymm11 vpxor ymm7, ymm7, ymm8 vpxor ymm4, ymm4, ymm9 vpxor ymm0, ymm0, ymm8 vpxor ymm1, ymm1, ymm9 vpxor ymm2, ymm2, ymm10 vpxor ymm3, ymm3, ymm11 vpsrld ymm8, ymm5, 7 vpslld ymm5, ymm5, 25 vpor ymm5, ymm5, ymm8 vpsrld ymm8, ymm6, 7 vpslld ymm6, ymm6, 25 vpor ymm6, ymm6, ymm8 vpsrld ymm8, ymm7, 7 vpslld ymm7, ymm7, 25 vpor ymm7, ymm7, ymm8 vpsrld ymm8, ymm4, 7 vpslld ymm4, ymm4, 25 vpor ymm4, ymm4, ymm8 vpxor ymm4, ymm4, ymm12 vpxor ymm5, ymm5, ymm13 vpxor ymm6, ymm6, ymm14 vpxor ymm7, ymm7, ymm15 movzx eax, byte ptr [rbp+0x38] jne 9b mov rbx, qword ptr [rbp+0x50] vunpcklps ymm8, ymm0, ymm1 vunpcklps ymm9, ymm2, ymm3 vunpckhps ymm10, ymm0, ymm1 vunpcklps ymm11, ymm4, ymm5 vunpcklps ymm0, ymm6, ymm7 vshufps ymm12, ymm8, ymm9, 78 vblendps ymm1, ymm8, ymm12, 0xCC vshufps ymm8, ymm11, ymm0, 78 vunpckhps ymm13, ymm2, ymm3 vblendps ymm2, ymm11, ymm8, 0xCC vblendps ymm3, ymm12, ymm9, 0xCC vperm2f128 ymm12, ymm1, ymm2, 0x20 vmovups ymmword ptr [rbx], ymm12 vunpckhps ymm14, ymm4, ymm5 vblendps ymm4, ymm8, ymm0, 0xCC vunpckhps ymm15, ymm6, ymm7 vperm2f128 ymm7, ymm3, ymm4, 0x20 vmovups ymmword ptr [rbx+0x20], ymm7 vshufps ymm5, ymm10, ymm13, 78 vblendps ymm6, ymm5, ymm13, 0xCC vshufps ymm13, ymm14, ymm15, 78 vblendps ymm10, ymm10, ymm5, 0xCC vblendps ymm14, ymm14, ymm13, 0xCC vperm2f128 ymm8, ymm10, ymm14, 0x20 vmovups ymmword ptr [rbx+0x40], ymm8 vblendps ymm15, ymm13, ymm15, 0xCC vperm2f128 ymm13, ymm6, ymm15, 0x20 vmovups ymmword ptr [rbx+0x60], ymm13 vperm2f128 ymm9, ymm1, ymm2, 0x31 vperm2f128 ymm11, ymm3, ymm4, 0x31 vmovups ymmword ptr [rbx+0x80], ymm9 vperm2f128 ymm14, ymm10, ymm14, 0x31 vperm2f128 ymm15, ymm6, ymm15, 0x31 vmovups ymmword ptr [rbx+0xA0], ymm11 vmovups ymmword ptr [rbx+0xC0], ymm14 vmovups ymmword ptr [rbx+0xE0], ymm15 vmovdqa ymm0, ymmword ptr [rsp+0x220] vpaddd ymm1, ymm0, ymmword ptr [rsp+0x240] vmovdqa ymmword ptr [rsp+0x240], ymm1 vpxor ymm0, ymm0, ymmword ptr [CMP_MSB_MASK+rip] vpxor ymm2, ymm1, ymmword ptr [CMP_MSB_MASK+rip] vpcmpgtd ymm2, ymm0, ymm2 vmovdqa ymm0, ymmword ptr [rsp+0x260] vpsubd ymm2, ymm0, ymm2 vmovdqa ymmword ptr [rsp+0x260], ymm2 add rdi, 64 add rbx, 256 mov qword ptr [rbp+0x50], rbx sub rsi, 8 cmp rsi, 8 jnc 2b test rsi, rsi jnz 3f 4: vzeroupper mov rsp, rbp pop rbp pop rbx pop r12 pop r13 pop r14 pop r15 RET .p2align 5 3: mov rbx, qword ptr [rbp+0x50] mov r15, qword ptr [rsp+0x2A0] movzx r13d, byte ptr [rbp+0x38] movzx r12d, byte ptr [rbp+0x48] test rsi, 0x4 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovdqa ymm8, ymm0 vmovdqa ymm9, ymm1 vbroadcasti128 ymm12, xmmword ptr [rsp+0x240] vbroadcasti128 ymm13, xmmword ptr [rsp+0x260] vpunpckldq ymm14, ymm12, ymm13 vpunpckhdq ymm15, ymm12, ymm13 vpermq ymm14, ymm14, 0x50 vpermq ymm15, ymm15, 0x50 vbroadcasti128 ymm12, xmmword ptr [BLAKE3_BLOCK_LEN+rip] vpblendd ymm14, ymm14, ymm12, 0x44 vpblendd ymm15, ymm15, ymm12, 0x44 vmovdqa ymmword ptr [rsp], ymm14 vmovdqa ymmword ptr [rsp+0x20], ymm15 mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] mov r10, qword ptr [rdi+0x10] mov r11, qword ptr [rdi+0x18] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vmovups ymm2, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm2, ymm3, 136 vshufps ymm5, ymm2, ymm3, 221 vmovups ymm2, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm2, ymm2, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm3, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm3, ymm3, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm2, ymm3, 136 vshufps ymm7, ymm2, ymm3, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 vmovups ymm10, ymmword ptr [r10+rdx-0x40] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x40], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x30] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x30], 0x01 vshufps ymm12, ymm10, ymm11, 136 vshufps ymm13, ymm10, ymm11, 221 vmovups ymm10, ymmword ptr [r10+rdx-0x20] vinsertf128 ymm10, ymm10, xmmword ptr [r11+rdx-0x20], 0x01 vmovups ymm11, ymmword ptr [r10+rdx-0x10] vinsertf128 ymm11, ymm11, xmmword ptr [r11+rdx-0x10], 0x01 vshufps ymm14, ymm10, ymm11, 136 vshufps ymm15, ymm10, ymm11, 221 vpshufd ymm14, ymm14, 0x93 vpshufd ymm15, ymm15, 0x93 prefetcht0 [r8+rdx+0x80] prefetcht0 [r9+rdx+0x80] prefetcht0 [r10+rdx+0x80] prefetcht0 [r11+rdx+0x80] vpbroadcastd ymm2, dword ptr [rsp+0x200] vmovdqa ymm3, ymmword ptr [rsp] vmovdqa ymm11, ymmword ptr [rsp+0x20] vpblendd ymm3, ymm3, ymm2, 0x88 vpblendd ymm11, ymm11, ymm2, 0x88 vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa ymm10, ymm2 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm8, ymm8, ymm12 vmovdqa ymmword ptr [rsp+0x40], ymm4 nop vmovdqa ymmword ptr [rsp+0x60], ymm12 nop vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm5 vpaddd ymm8, ymm8, ymm13 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vmovdqa ymmword ptr [rsp+0x80], ymm5 vmovdqa ymmword ptr [rsp+0xA0], ymm13 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x93 vpshufd ymm8, ymm8, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x39 vpshufd ymm10, ymm10, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm8, ymm8, ymm14 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT16+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 12 vpslld ymm9, ymm9, 20 vpor ymm9, ymm9, ymm4 vpaddd ymm0, ymm0, ymm7 vpaddd ymm8, ymm8, ymm15 vpaddd ymm0, ymm0, ymm1 vpaddd ymm8, ymm8, ymm9 vpxor ymm3, ymm3, ymm0 vpxor ymm11, ymm11, ymm8 vbroadcasti128 ymm4, xmmword ptr [ROT8+rip] vpshufb ymm3, ymm3, ymm4 vpshufb ymm11, ymm11, ymm4 vpaddd ymm2, ymm2, ymm3 vpaddd ymm10, ymm10, ymm11 vpxor ymm1, ymm1, ymm2 vpxor ymm9, ymm9, ymm10 vpsrld ymm4, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm4 vpsrld ymm4, ymm9, 7 vpslld ymm9, ymm9, 25 vpor ymm9, ymm9, ymm4 vpshufd ymm0, ymm0, 0x39 vpshufd ymm8, ymm8, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm11, ymm11, 0x4E vpshufd ymm2, ymm2, 0x93 vpshufd ymm10, ymm10, 0x93 dec al je 9f vmovdqa ymm4, ymmword ptr [rsp+0x40] vmovdqa ymm5, ymmword ptr [rsp+0x80] vshufps ymm12, ymm4, ymm5, 214 vpshufd ymm13, ymm4, 0x0F vpshufd ymm4, ymm12, 0x39 vshufps ymm12, ymm6, ymm7, 250 vpblendd ymm13, ymm13, ymm12, 0xAA vpunpcklqdq ymm12, ymm7, ymm5 vpblendd ymm12, ymm12, ymm6, 0x88 vpshufd ymm12, ymm12, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymmword ptr [rsp+0x40], ymm13 vmovdqa ymmword ptr [rsp+0x80], ymm12 vmovdqa ymm12, ymmword ptr [rsp+0x60] vmovdqa ymm13, ymmword ptr [rsp+0xA0] vshufps ymm5, ymm12, ymm13, 214 vpshufd ymm6, ymm12, 0x0F vpshufd ymm12, ymm5, 0x39 vshufps ymm5, ymm14, ymm15, 250 vpblendd ymm6, ymm6, ymm5, 0xAA vpunpcklqdq ymm5, ymm15, ymm13 vpblendd ymm5, ymm5, ymm14, 0x88 vpshufd ymm5, ymm5, 0x78 vpunpckhdq ymm13, ymm13, ymm15 vpunpckldq ymm14, ymm14, ymm13 vpshufd ymm15, ymm14, 0x1E vmovdqa ymm13, ymm6 vmovdqa ymm14, ymm5 vmovdqa ymm5, ymmword ptr [rsp+0x40] vmovdqa ymm6, ymmword ptr [rsp+0x80] jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 vpxor ymm8, ymm8, ymm10 vpxor ymm9, ymm9, ymm11 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovdqu xmmword ptr [rbx+0x40], xmm8 vmovdqu xmmword ptr [rbx+0x50], xmm9 vextracti128 xmmword ptr [rbx+0x60], ymm8, 0x01 vextracti128 xmmword ptr [rbx+0x70], ymm9, 0x01 vmovaps xmm8, xmmword ptr [rsp+0x280] vmovaps xmm0, xmmword ptr [rsp+0x240] vmovaps xmm1, xmmword ptr [rsp+0x250] vmovaps xmm2, xmmword ptr [rsp+0x260] vmovaps xmm3, xmmword ptr [rsp+0x270] vblendvps xmm0, xmm0, xmm1, xmm8 vblendvps xmm2, xmm2, xmm3, xmm8 vmovaps xmmword ptr [rsp+0x240], xmm0 vmovaps xmmword ptr [rsp+0x260], xmm2 add rbx, 128 add rdi, 32 sub rsi, 4 3: test rsi, 0x2 je 3f vbroadcasti128 ymm0, xmmword ptr [rcx] vbroadcasti128 ymm1, xmmword ptr [rcx+0x10] vmovd xmm13, dword ptr [rsp+0x240] vpinsrd xmm13, xmm13, dword ptr [rsp+0x260], 1 vpinsrd xmm13, xmm13, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovd xmm14, dword ptr [rsp+0x244] vpinsrd xmm14, xmm14, dword ptr [rsp+0x264], 1 vpinsrd xmm14, xmm14, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vinserti128 ymm13, ymm13, xmm14, 0x01 vbroadcasti128 ymm14, xmmword ptr [ROT16+rip] vbroadcasti128 ymm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] mov r9, qword ptr [rdi+0x8] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d mov dword ptr [rsp+0x200], eax vbroadcasti128 ymm2, xmmword ptr [BLAKE3_IV+rip] vpbroadcastd ymm8, dword ptr [rsp+0x200] vpblendd ymm3, ymm13, ymm8, 0x88 vmovups ymm8, ymmword ptr [r8+rdx-0x40] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x40], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x30] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x30], 0x01 vshufps ymm4, ymm8, ymm9, 136 vshufps ymm5, ymm8, ymm9, 221 vmovups ymm8, ymmword ptr [r8+rdx-0x20] vinsertf128 ymm8, ymm8, xmmword ptr [r9+rdx-0x20], 0x01 vmovups ymm9, ymmword ptr [r8+rdx-0x10] vinsertf128 ymm9, ymm9, xmmword ptr [r9+rdx-0x10], 0x01 vshufps ymm6, ymm8, ymm9, 136 vshufps ymm7, ymm8, ymm9, 221 vpshufd ymm6, ymm6, 0x93 vpshufd ymm7, ymm7, 0x93 mov al, 7 9: vpaddd ymm0, ymm0, ymm4 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm5 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x93 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x39 vpaddd ymm0, ymm0, ymm6 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm14 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 12 vpslld ymm1, ymm1, 20 vpor ymm1, ymm1, ymm8 vpaddd ymm0, ymm0, ymm7 vpaddd ymm0, ymm0, ymm1 vpxor ymm3, ymm3, ymm0 vpshufb ymm3, ymm3, ymm15 vpaddd ymm2, ymm2, ymm3 vpxor ymm1, ymm1, ymm2 vpsrld ymm8, ymm1, 7 vpslld ymm1, ymm1, 25 vpor ymm1, ymm1, ymm8 vpshufd ymm0, ymm0, 0x39 vpshufd ymm3, ymm3, 0x4E vpshufd ymm2, ymm2, 0x93 dec al jz 9f vshufps ymm8, ymm4, ymm5, 214 vpshufd ymm9, ymm4, 0x0F vpshufd ymm4, ymm8, 0x39 vshufps ymm8, ymm6, ymm7, 250 vpblendd ymm9, ymm9, ymm8, 0xAA vpunpcklqdq ymm8, ymm7, ymm5 vpblendd ymm8, ymm8, ymm6, 0x88 vpshufd ymm8, ymm8, 0x78 vpunpckhdq ymm5, ymm5, ymm7 vpunpckldq ymm6, ymm6, ymm5 vpshufd ymm7, ymm6, 0x1E vmovdqa ymm5, ymm9 vmovdqa ymm6, ymm8 jmp 9b 9: vpxor ymm0, ymm0, ymm2 vpxor ymm1, ymm1, ymm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 vextracti128 xmmword ptr [rbx+0x20], ymm0, 0x01 vextracti128 xmmword ptr [rbx+0x30], ymm1, 0x01 vmovaps ymm8, ymmword ptr [rsp+0x280] vmovaps ymm0, ymmword ptr [rsp+0x240] vmovups ymm1, ymmword ptr [rsp+0x248] vmovaps ymm2, ymmword ptr [rsp+0x260] vmovups ymm3, ymmword ptr [rsp+0x268] vblendvps ymm0, ymm0, ymm1, ymm8 vblendvps ymm2, ymm2, ymm3, ymm8 vmovaps ymmword ptr [rsp+0x240], ymm0 vmovaps ymmword ptr [rsp+0x260], ymm2 add rbx, 64 add rdi, 16 sub rsi, 2 3: test rsi, 0x1 je 4b vmovdqu xmm0, xmmword ptr [rcx] vmovdqu xmm1, xmmword ptr [rcx+0x10] vmovd xmm3, dword ptr [rsp+0x240] vpinsrd xmm3, xmm3, dword ptr [rsp+0x260], 1 vpinsrd xmm13, xmm3, dword ptr [BLAKE3_BLOCK_LEN+rip], 2 vmovdqa xmm14, xmmword ptr [ROT16+rip] vmovdqa xmm15, xmmword ptr [ROT8+rip] mov r8, qword ptr [rdi] movzx eax, byte ptr [rbp+0x40] or eax, r13d xor edx, edx .p2align 5 2: mov r14d, eax or eax, r12d add rdx, 64 cmp rdx, r15 cmovne eax, r14d vmovdqa xmm2, xmmword ptr [BLAKE3_IV+rip] vmovdqa xmm3, xmm13 vpinsrd xmm3, xmm3, eax, 3 vmovups xmm8, xmmword ptr [r8+rdx-0x40] vmovups xmm9, xmmword ptr [r8+rdx-0x30] vshufps xmm4, xmm8, xmm9, 136 vshufps xmm5, xmm8, xmm9, 221 vmovups xmm8, xmmword ptr [r8+rdx-0x20] vmovups xmm9, xmmword ptr [r8+rdx-0x10] vshufps xmm6, xmm8, xmm9, 136 vshufps xmm7, xmm8, xmm9, 221 vpshufd xmm6, xmm6, 0x93 vpshufd xmm7, xmm7, 0x93 mov al, 7 9: vpaddd xmm0, xmm0, xmm4 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm5 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x93 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x39 vpaddd xmm0, xmm0, xmm6 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm14 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 12 vpslld xmm1, xmm1, 20 vpor xmm1, xmm1, xmm8 vpaddd xmm0, xmm0, xmm7 vpaddd xmm0, xmm0, xmm1 vpxor xmm3, xmm3, xmm0 vpshufb xmm3, xmm3, xmm15 vpaddd xmm2, xmm2, xmm3 vpxor xmm1, xmm1, xmm2 vpsrld xmm8, xmm1, 7 vpslld xmm1, xmm1, 25 vpor xmm1, xmm1, xmm8 vpshufd xmm0, xmm0, 0x39 vpshufd xmm3, xmm3, 0x4E vpshufd xmm2, xmm2, 0x93 dec al jz 9f vshufps xmm8, xmm4, xmm5, 214 vpshufd xmm9, xmm4, 0x0F vpshufd xmm4, xmm8, 0x39 vshufps xmm8, xmm6, xmm7, 250 vpblendd xmm9, xmm9, xmm8, 0xAA vpunpcklqdq xmm8, xmm7, xmm5 vpblendd xmm8, xmm8, xmm6, 0x88 vpshufd xmm8, xmm8, 0x78 vpunpckhdq xmm5, xmm5, xmm7 vpunpckldq xmm6, xmm6, xmm5 vpshufd xmm7, xmm6, 0x1E vmovdqa xmm5, xmm9 vmovdqa xmm6, xmm8 jmp 9b 9: vpxor xmm0, xmm0, xmm2 vpxor xmm1, xmm1, xmm3 mov eax, r13d cmp rdx, r15 jne 2b vmovdqu xmmword ptr [rbx], xmm0 vmovdqu xmmword ptr [rbx+0x10], xmm1 jmp 4b SET_SIZE(zfs_blake3_hash_many_avx2) SECTION_STATIC -.section .rodata .p2align 6 ADD0: .long 0, 1, 2, 3, 4, 5, 6, 7 ADD1: .long 8, 8, 8, 8, 8, 8, 8, 8 BLAKE3_IV_0: .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 .long 0x6A09E667, 0x6A09E667, 0x6A09E667, 0x6A09E667 BLAKE3_IV_1: .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 .long 0xBB67AE85, 0xBB67AE85, 0xBB67AE85, 0xBB67AE85 BLAKE3_IV_2: .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 .long 0x3C6EF372, 0x3C6EF372, 0x3C6EF372, 0x3C6EF372 BLAKE3_IV_3: .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A .long 0xA54FF53A, 0xA54FF53A, 0xA54FF53A, 0xA54FF53A BLAKE3_BLOCK_LEN: .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 .long 0x00000040, 0x00000040, 0x00000040, 0x00000040 ROT16: .byte 2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13 ROT8: .byte 1, 2, 3, 0, 5, 6, 7, 4, 9, 10, 11, 8, 13, 14, 15, 12 CMP_MSB_MASK: .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 .long 0x80000000, 0x80000000, 0x80000000, 0x80000000 BLAKE3_IV: .long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A #endif /* HAVE_AVX2 */ #ifdef __ELF__ .section .note.GNU-stack,"",%progbits #endif diff --git a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S index 165492a0ed76..909b2147dff9 100644 --- a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S +++ b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S @@ -1,1259 +1,1264 @@ # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the Apache License 2.0 (the "License"). You may not use # this file except in compliance with the License. You can obtain a copy # in the file LICENSE in the source distribution or at # https://www.openssl.org/source/license.html # # ==================================================================== # Written by Andy Polyakov for the OpenSSL # project. The module is, however, dual licensed under OpenSSL and # CRYPTOGAMS licenses depending on where you obtain it. For further # details see http://www.openssl.org/~appro/cryptogams/. # ==================================================================== # # # AES-NI-CTR+GHASH stitch. # # February 2013 # # OpenSSL GCM implementation is organized in such way that its # performance is rather close to the sum of its streamed components, # in the context parallelized AES-NI CTR and modulo-scheduled # PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation # was observed to perform significantly better than the sum of the # components on contemporary CPUs, the effort was deemed impossible to # justify. This module is based on combination of Intel submissions, # [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max # Locktyukhin of Intel Corp. who verified that it reduces shuffles # pressure with notable relative improvement, achieving 1.0 cycle per # byte processed with 128-bit key on Haswell processor, 0.74 - on # Broadwell, 0.63 - on Skylake... [Mentioned results are raw profiled # measurements for favourable packet size, one divisible by 96. # Applications using the EVP interface will observe a few percent # worse performance.] # # Knights Landing processes 1 byte in 1.25 cycles (measured with EVP). # # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest # [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf # Generated once from # https://github.com/openssl/openssl/blob/5ffc3324/crypto/modes/asm/aesni-gcm-x86_64.pl # and modified for ICP. Modification are kept at a bare minimum to ease later # upstream merges. #if defined(__x86_64__) && defined(HAVE_AVX) && \ defined(HAVE_AES) && defined(HAVE_PCLMULQDQ) #define _ASM #include /* Windows userland links with OpenSSL */ #if !defined (_WIN32) || defined (_KERNEL) +/* Apple needs _ */ +#if defined (__APPLE__) +#define gcm_avx_can_use_movbe _gcm_avx_can_use_movbe +#endif + .extern gcm_avx_can_use_movbe .text #ifdef HAVE_MOVBE .balign 32 FUNCTION(_aesni_ctr32_ghash_6x) .cfi_startproc ENDBR vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x .balign 32 .Loop6x: addl $100663296,%ebx jc .Lhandle_ctr32 vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 .Lresume_ctr32: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movbeq 88(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 80(%r14),%r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movbeq 72(%r14),%r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movbeq 64(%r14),%r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movbeq 56(%r14),%r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movbeq 48(%r14),%r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movbeq 40(%r14),%r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movbeq 32(%r14),%r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movbeq 24(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 16(%r14),%r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movbeq 8(%r14),%r13 vaesenc %xmm1,%xmm13,%xmm13 movbeq 0(%r14),%r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $12,%ebp // ICP uses 10,12,14 not 9,11,13 for rounds. jb .Lenc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 cmpl $14,%ebp // ICP does not zero key schedule. jb .Lenc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail .balign 32 .Lhandle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32 .balign 32 .Lenc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%r10 subq $0x6,%rdx jc .L6x_done vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp .Loop6x .L6x_done: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 RET .cfi_endproc SET_SIZE(_aesni_ctr32_ghash_6x) #endif /* ifdef HAVE_MOVBE */ .balign 32 FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) .cfi_startproc ENDBR vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x_nmb .balign 32 .Loop6x_nmb: addl $100663296,%ebx jc .Lhandle_ctr32_nmb vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 .Lresume_ctr32_nmb: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movq 88(%r14),%r13 bswapq %r13 vaesenc %xmm15,%xmm11,%xmm11 movq 80(%r14),%r12 bswapq %r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movq 72(%r14),%r13 bswapq %r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movq 64(%r14),%r12 bswapq %r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movq 56(%r14),%r13 bswapq %r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movq 48(%r14),%r12 bswapq %r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movq 40(%r14),%r13 bswapq %r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movq 32(%r14),%r12 bswapq %r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movq 24(%r14),%r13 bswapq %r13 vaesenc %xmm15,%xmm11,%xmm11 movq 16(%r14),%r12 bswapq %r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movq 8(%r14),%r13 bswapq %r13 vaesenc %xmm1,%xmm13,%xmm13 movq 0(%r14),%r12 bswapq %r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $12,%ebp // ICP uses 10,12,14 not 9,11,13 for rounds. jb .Lenc_tail_nmb vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 cmpl $14,%ebp // ICP does not zero key schedule. jb .Lenc_tail_nmb vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail_nmb .balign 32 .Lhandle_ctr32_nmb: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32_nmb .balign 32 .Lenc_tail_nmb: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%r10 subq $0x6,%rdx jc .L6x_done_nmb vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp .Loop6x_nmb .L6x_done_nmb: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 RET .cfi_endproc SET_SIZE(_aesni_ctr32_ghash_no_movbe_6x) ENTRY_ALIGN(aesni_gcm_decrypt, 32) .cfi_startproc ENDBR xorq %r10,%r10 cmpq $0x60,%rdx jb .Lgcm_dec_abort leaq (%rsp),%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 pushq %r9 .cfi_offset %r9,-64 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 vmovdqu (%r9),%xmm8 andq $-128,%rsp vmovdqu (%r11),%xmm0 leaq 128(%rcx),%rcx movq 32(%r9),%r9 leaq 32(%r9),%r9 movl 504-128(%rcx),%ebp // ICP has a larger offset for rounds. vpshufb %xmm0,%xmm8,%xmm8 andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Ldec_no_key_aliasing cmpq $768,%r15 jnc .Ldec_no_key_aliasing subq %r15,%rsp .Ldec_no_key_aliasing: vmovdqu 80(%rdi),%xmm7 leaq (%rdi),%r14 vmovdqu 64(%rdi),%xmm4 leaq -192(%rdi,%rdx,1),%r15 vmovdqu 48(%rdi),%xmm5 shrq $4,%rdx xorq %r10,%r10 vmovdqu 32(%rdi),%xmm6 vpshufb %xmm0,%xmm7,%xmm7 vmovdqu 16(%rdi),%xmm2 vpshufb %xmm0,%xmm4,%xmm4 vmovdqu (%rdi),%xmm3 vpshufb %xmm0,%xmm5,%xmm5 vmovdqu %xmm4,48(%rsp) vpshufb %xmm0,%xmm6,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm2,%xmm2 vmovdqu %xmm6,80(%rsp) vpshufb %xmm0,%xmm3,%xmm3 vmovdqu %xmm2,96(%rsp) vmovdqu %xmm3,112(%rsp) #ifdef HAVE_MOVBE #ifdef _KERNEL testl $1,gcm_avx_can_use_movbe(%rip) #else testl $1,gcm_avx_can_use_movbe@GOTPCREL(%rip) #endif jz 1f call _aesni_ctr32_ghash_6x jmp 2f 1: #endif call _aesni_ctr32_ghash_no_movbe_6x 2: vmovups %xmm9,-96(%rsi) vmovups %xmm10,-80(%rsi) vmovups %xmm11,-64(%rsi) vmovups %xmm12,-48(%rsi) vmovups %xmm13,-32(%rsi) vmovups %xmm14,-16(%rsi) vpshufb (%r11),%xmm8,%xmm8 movq -56(%rax),%r9 .cfi_restore %r9 vmovdqu %xmm8,(%r9) vzeroupper movq -48(%rax),%r15 .cfi_restore %r15 movq -40(%rax),%r14 .cfi_restore %r14 movq -32(%rax),%r13 .cfi_restore %r13 movq -24(%rax),%r12 .cfi_restore %r12 movq -16(%rax),%rbp .cfi_restore %rbp movq -8(%rax),%rbx .cfi_restore %rbx leaq (%rax),%rsp .cfi_def_cfa_register %rsp .Lgcm_dec_abort: movq %r10,%rax RET .cfi_endproc SET_SIZE(aesni_gcm_decrypt) .balign 32 FUNCTION(_aesni_ctr32_6x) .cfi_startproc ENDBR vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -2(%rbp),%r13 // ICP uses 10,12,14 not 9,11,13 for rounds. vmovups 16-128(%rcx),%xmm15 leaq 32-128(%rcx),%r12 vpxor %xmm4,%xmm1,%xmm9 addl $100663296,%ebx jc .Lhandle_ctr32_2 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddb %xmm2,%xmm11,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddb %xmm2,%xmm12,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .balign 16 .Loop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vmovups (%r12),%xmm15 leaq 16(%r12),%r12 decl %r13d jnz .Loop_ctr32 vmovdqu (%r12),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor 0(%rdi),%xmm3,%xmm4 vaesenc %xmm15,%xmm10,%xmm10 vpxor 16(%rdi),%xmm3,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 vpxor 32(%rdi),%xmm3,%xmm6 vaesenc %xmm15,%xmm12,%xmm12 vpxor 48(%rdi),%xmm3,%xmm8 vaesenc %xmm15,%xmm13,%xmm13 vpxor 64(%rdi),%xmm3,%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vpxor 80(%rdi),%xmm3,%xmm3 leaq 96(%rdi),%rdi vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm5,%xmm10,%xmm10 vaesenclast %xmm6,%xmm11,%xmm11 vaesenclast %xmm8,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vaesenclast %xmm3,%xmm14,%xmm14 vmovups %xmm9,0(%rsi) vmovups %xmm10,16(%rsi) vmovups %xmm11,32(%rsi) vmovups %xmm12,48(%rsi) vmovups %xmm13,64(%rsi) vmovups %xmm14,80(%rsi) leaq 96(%rsi),%rsi RET .balign 32 .Lhandle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpshufb %xmm0,%xmm14,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .cfi_endproc SET_SIZE(_aesni_ctr32_6x) ENTRY_ALIGN(aesni_gcm_encrypt, 32) .cfi_startproc ENDBR xorq %r10,%r10 cmpq $288,%rdx jb .Lgcm_enc_abort leaq (%rsp),%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 pushq %r9 .cfi_offset %r9,-64 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 leaq 128(%rcx),%rcx vmovdqu (%r11),%xmm0 andq $-128,%rsp movl 504-128(%rcx),%ebp // ICP has an larger offset for rounds. andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Lenc_no_key_aliasing cmpq $768,%r15 jnc .Lenc_no_key_aliasing subq %r15,%rsp .Lenc_no_key_aliasing: leaq (%rsi),%r14 leaq -192(%rsi,%rdx,1),%r15 shrq $4,%rdx call _aesni_ctr32_6x vpshufb %xmm0,%xmm9,%xmm8 vpshufb %xmm0,%xmm10,%xmm2 vmovdqu %xmm8,112(%rsp) vpshufb %xmm0,%xmm11,%xmm4 vmovdqu %xmm2,96(%rsp) vpshufb %xmm0,%xmm12,%xmm5 vmovdqu %xmm4,80(%rsp) vpshufb %xmm0,%xmm13,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm14,%xmm7 vmovdqu %xmm6,48(%rsp) call _aesni_ctr32_6x vmovdqu (%r9),%xmm8 movq 32(%r9),%r9 leaq 32(%r9),%r9 subq $12,%rdx movq $192,%r10 vpshufb %xmm0,%xmm8,%xmm8 #ifdef HAVE_MOVBE #ifdef _KERNEL testl $1,gcm_avx_can_use_movbe(%rip) #else testl $1,gcm_avx_can_use_movbe@GOTPCREL(%rip) #endif jz 1f call _aesni_ctr32_ghash_6x jmp 2f 1: #endif call _aesni_ctr32_ghash_no_movbe_6x 2: vmovdqu 32(%rsp),%xmm7 vmovdqu (%r11),%xmm0 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm7,%xmm7,%xmm1 vmovdqu 32-32(%r9),%xmm15 vmovups %xmm9,-96(%rsi) vpshufb %xmm0,%xmm9,%xmm9 vpxor %xmm7,%xmm1,%xmm1 vmovups %xmm10,-80(%rsi) vpshufb %xmm0,%xmm10,%xmm10 vmovups %xmm11,-64(%rsi) vpshufb %xmm0,%xmm11,%xmm11 vmovups %xmm12,-48(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vmovups %xmm13,-32(%rsi) vpshufb %xmm0,%xmm13,%xmm13 vmovups %xmm14,-16(%rsi) vpshufb %xmm0,%xmm14,%xmm14 vmovdqu %xmm9,16(%rsp) vmovdqu 48(%rsp),%xmm6 vmovdqu 16-32(%r9),%xmm0 vpunpckhqdq %xmm6,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 vpxor %xmm6,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vmovdqu 64(%rsp),%xmm9 vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm9,%xmm9,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 vpxor %xmm9,%xmm5,%xmm5 vpxor %xmm7,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vmovdqu 80(%rsp),%xmm1 vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm4,%xmm7,%xmm7 vpunpckhqdq %xmm1,%xmm1,%xmm4 vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm6,%xmm9,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 96(%rsp),%xmm2 vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm7,%xmm6,%xmm6 vpunpckhqdq %xmm2,%xmm2,%xmm7 vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm7,%xmm7 vpxor %xmm9,%xmm1,%xmm1 vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm5,%xmm4,%xmm4 vpxor 112(%rsp),%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 vmovdqu 112-32(%r9),%xmm0 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 vpxor %xmm4,%xmm7,%xmm4 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm1 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 vpxor %xmm14,%xmm1,%xmm1 vpxor %xmm5,%xmm6,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 vmovdqu 32-32(%r9),%xmm15 vpxor %xmm2,%xmm8,%xmm7 vpxor %xmm4,%xmm9,%xmm6 vmovdqu 16-32(%r9),%xmm0 vpxor %xmm5,%xmm7,%xmm9 vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 vpxor %xmm9,%xmm6,%xmm6 vpunpckhqdq %xmm13,%xmm13,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 vpxor %xmm13,%xmm2,%xmm2 vpslldq $8,%xmm6,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vpxor %xmm9,%xmm5,%xmm8 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm6,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm12,%xmm12,%xmm9 vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 vpxor %xmm12,%xmm9,%xmm9 vpxor %xmm14,%xmm13,%xmm13 vpalignr $8,%xmm8,%xmm8,%xmm14 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm11,%xmm11,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm13,%xmm12,%xmm12 vxorps 16(%rsp),%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm9,%xmm9 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm10,%xmm10,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 vpxor %xmm10,%xmm2,%xmm2 vpalignr $8,%xmm8,%xmm8,%xmm14 vpxor %xmm12,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm9,%xmm1,%xmm1 vxorps %xmm7,%xmm14,%xmm14 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 vmovdqu 112-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm11,%xmm10,%xmm10 vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 vpxor %xmm4,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 vpxor %xmm10,%xmm7,%xmm7 vpxor %xmm2,%xmm6,%xmm6 vpxor %xmm5,%xmm7,%xmm4 vpxor %xmm4,%xmm6,%xmm6 vpslldq $8,%xmm6,%xmm1 vmovdqu 16(%r11),%xmm3 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm1,%xmm5,%xmm8 vpxor %xmm6,%xmm7,%xmm7 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm2,%xmm8,%xmm8 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm7,%xmm2,%xmm2 vpxor %xmm2,%xmm8,%xmm8 vpshufb (%r11),%xmm8,%xmm8 movq -56(%rax),%r9 .cfi_restore %r9 vmovdqu %xmm8,(%r9) vzeroupper movq -48(%rax),%r15 .cfi_restore %r15 movq -40(%rax),%r14 .cfi_restore %r14 movq -32(%rax),%r13 .cfi_restore %r13 movq -24(%rax),%r12 .cfi_restore %r12 movq -16(%rax),%rbp .cfi_restore %rbp movq -8(%rax),%rbx .cfi_restore %rbx leaq (%rax),%rsp .cfi_def_cfa_register %rsp .Lgcm_enc_abort: movq %r10,%rax RET .cfi_endproc SET_SIZE(aesni_gcm_encrypt) #endif /* !_WIN32 || _KERNEL */ /* Some utility routines */ /* * clear all fpu registers * void clear_fpu_regs_avx(void); */ ENTRY_ALIGN(clear_fpu_regs_avx, 32) vzeroall RET SET_SIZE(clear_fpu_regs_avx) /* * void gcm_xor_avx(const uint8_t *src, uint8_t *dst); * * XORs one pair of unaligned 128-bit blocks from `src' and `dst' and * stores the result at `dst'. The XOR is performed using FPU registers, * so make sure FPU state is saved when running this in the kernel. */ ENTRY_ALIGN(gcm_xor_avx, 32) movdqu (%rdi), %xmm0 movdqu (%rsi), %xmm1 pxor %xmm1, %xmm0 movdqu %xmm0, (%rsi) RET SET_SIZE(gcm_xor_avx) /* * Toggle a boolean_t value atomically and return the new value. * boolean_t atomic_toggle_boolean_nv(volatile boolean_t *); */ ENTRY_ALIGN(atomic_toggle_boolean_nv, 32) xorl %eax, %eax lock xorl $1, (%rdi) jz 1f movl $1, %eax 1: RET SET_SIZE(atomic_toggle_boolean_nv) SECTION_STATIC .balign 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lpoly: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 .Lone_msb: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 .Ltwo_lsb: .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .Lone_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .balign 64 /* Mark the stack non-executable. */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif #endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */ diff --git a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S index e40b3df32753..dec782fda33e 100644 --- a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S +++ b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S @@ -1,254 +1,254 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or https://opensource.org/licenses/CDDL-1.0. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2009 Intel Corporation * All Rights Reserved. */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* * Accelerated GHASH implementation with Intel PCLMULQDQ-NI * instructions. This file contains an accelerated * Galois Field Multiplication implementation. * * PCLMULQDQ is used to accelerate the most time-consuming part of GHASH, * carry-less multiplication. More information about PCLMULQDQ can be * found at: * http://software.intel.com/en-us/articles/ * carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/ * */ /* * ==================================================================== * OpenSolaris OS modifications * * This source originates as file galois_hash_asm.c from * Intel Corporation dated September 21, 2009. * * This OpenSolaris version has these major changes from the original source: * * 1. Added OpenSolaris ENTRY_NP/SET_SIZE macros from * /usr/include/sys/asm_linkage.h, lint(1B) guards, and a dummy C function * definition for lint. * * 2. Formatted code, added comments, and added #includes and #defines. * * 3. If bit CR0.TS is set, clear and set the TS bit, after and before * calling kpreempt_disable() and kpreempt_enable(). * If the TS bit is not set, Save and restore %xmm registers at the beginning * and end of function calls (%xmm* registers are not saved and restored by * during kernel thread preemption). * * 4. Removed code to perform hashing. This is already done with C macro * GHASH in gcm.c. For better performance, this removed code should be * reintegrated in the future to replace the C GHASH macro. * * 5. Added code to byte swap 16-byte input and output. * * 6. Folded in comments from the original C source with embedded assembly * (SB_w_shift_xor.c) * * 7. Renamed function and reordered parameters to match OpenSolaris: * Intel interface: * void galois_hash_asm(unsigned char *hk, unsigned char *s, * unsigned char *d, int length) * OpenSolaris OS interface: * void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res); * ==================================================================== */ #if defined(lint) || defined(__lint) /* lint */ #include void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) { (void) x_in, (void) y, (void) res; } #elif defined(HAVE_PCLMULQDQ) /* guard by instruction set */ #define _ASM #include /* * Use this mask to byte-swap a 16-byte integer with the pshufb instruction */ // static uint8_t byte_swap16_mask[] = { // 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 }; -.section .rodata +SECTION_STATIC .balign XMM_ALIGN .Lbyte_swap16_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* * void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res); * * Perform a carry-less multiplication (that is, use XOR instead of the * multiply operator) on P1 and P2 and place the result in P3. * * Byte swap the input and the output. * * Note: x_in, y, and res all point to a block of 20-byte numbers * (an array of two 64-bit integers). * * Note2: For kernel code, caller is responsible for ensuring * kpreempt_disable() has been called. This is because %xmm registers are * not saved/restored. Clear and set the CR0.TS bit on entry and exit, * respectively, if TS is set on entry. Otherwise, if TS is not set, * save and restore %xmm registers on the stack. * * Note3: Original Intel definition: * void galois_hash_asm(unsigned char *hk, unsigned char *s, * unsigned char *d, int length) * * Note4: Register/parameter mapping: * Intel: * Parameter 1: %rcx (copied to %xmm0) hk or x_in * Parameter 2: %rdx (copied to %xmm1) s or y * Parameter 3: %rdi (result) d or res * OpenSolaris: * Parameter 1: %rdi (copied to %xmm0) x_in * Parameter 2: %rsi (copied to %xmm1) y * Parameter 3: %rdx (result) res */ ENTRY_NP(gcm_mul_pclmulqdq) // // Copy Parameters // movdqu (%rdi), %xmm0 // P1 movdqu (%rsi), %xmm1 // P2 // // Byte swap 16-byte input // lea .Lbyte_swap16_mask(%rip), %rax movups (%rax), %xmm10 pshufb %xmm10, %xmm0 pshufb %xmm10, %xmm1 // // Multiply with the hash key // movdqu %xmm0, %xmm3 pclmulqdq $0, %xmm1, %xmm3 // xmm3 holds a0*b0 movdqu %xmm0, %xmm4 pclmulqdq $16, %xmm1, %xmm4 // xmm4 holds a0*b1 movdqu %xmm0, %xmm5 pclmulqdq $1, %xmm1, %xmm5 // xmm5 holds a1*b0 movdqu %xmm0, %xmm6 pclmulqdq $17, %xmm1, %xmm6 // xmm6 holds a1*b1 pxor %xmm5, %xmm4 // xmm4 holds a0*b1 + a1*b0 movdqu %xmm4, %xmm5 // move the contents of xmm4 to xmm5 psrldq $8, %xmm4 // shift by xmm4 64 bits to the right pslldq $8, %xmm5 // shift by xmm5 64 bits to the left pxor %xmm5, %xmm3 pxor %xmm4, %xmm6 // Register pair holds the result // of the carry-less multiplication of // xmm0 by xmm1. // We shift the result of the multiplication by one bit position // to the left to cope for the fact that the bits are reversed. movdqu %xmm3, %xmm7 movdqu %xmm6, %xmm8 pslld $1, %xmm3 pslld $1, %xmm6 psrld $31, %xmm7 psrld $31, %xmm8 movdqu %xmm7, %xmm9 pslldq $4, %xmm8 pslldq $4, %xmm7 psrldq $12, %xmm9 por %xmm7, %xmm3 por %xmm8, %xmm6 por %xmm9, %xmm6 // // First phase of the reduction // // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts // independently. movdqu %xmm3, %xmm7 movdqu %xmm3, %xmm8 movdqu %xmm3, %xmm9 pslld $31, %xmm7 // packed right shift shifting << 31 pslld $30, %xmm8 // packed right shift shifting << 30 pslld $25, %xmm9 // packed right shift shifting << 25 pxor %xmm8, %xmm7 // xor the shifted versions pxor %xmm9, %xmm7 movdqu %xmm7, %xmm8 pslldq $12, %xmm7 psrldq $4, %xmm8 pxor %xmm7, %xmm3 // first phase of the reduction complete // // Second phase of the reduction // // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these // shift operations. movdqu %xmm3, %xmm2 movdqu %xmm3, %xmm4 // packed left shifting >> 1 movdqu %xmm3, %xmm5 psrld $1, %xmm2 psrld $2, %xmm4 // packed left shifting >> 2 psrld $7, %xmm5 // packed left shifting >> 7 pxor %xmm4, %xmm2 // xor the shifted versions pxor %xmm5, %xmm2 pxor %xmm8, %xmm2 pxor %xmm2, %xmm3 pxor %xmm3, %xmm6 // the result is in xmm6 // // Byte swap 16-byte result // pshufb %xmm10, %xmm6 // %xmm10 has the swap mask // // Store the result // movdqu %xmm6, (%rdx) // P3 // // Return // RET SET_SIZE(gcm_mul_pclmulqdq) #endif /* lint || __lint */ #ifdef __ELF__ .section .note.GNU-stack,"",%progbits #endif diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S index f3d701528459..f1fde51c1d69 100644 --- a/module/icp/asm-x86_64/sha2/sha256_impl.S +++ b/module/icp/asm-x86_64/sha2/sha256_impl.S @@ -1,2090 +1,2090 @@ /* * ==================================================================== * Written by Andy Polyakov for the OpenSSL * project. Rights for redistribution and usage in source and binary * forms are granted according to the OpenSSL license. * ==================================================================== * * sha256/512_block procedure for x86_64. * * 40% improvement over compiler-generated code on Opteron. On EM64T * sha256 was observed to run >80% faster and sha512 - >40%. No magical * tricks, just straight implementation... I really wonder why gcc * [being armed with inline assembler] fails to generate as fast code. * The only thing which is cool about this module is that it's very * same instruction sequence used for both SHA-256 and SHA-512. In * former case the instructions operate on 32-bit operands, while in * latter - on 64-bit ones. All I had to do is to get one flavor right, * the other one passed the test right away:-) * * sha256_block runs in ~1005 cycles on Opteron, which gives you * asymptotic performance of 64*1000/1005=63.7MBps times CPU clock * frequency in GHz. sha512_block runs in ~1275 cycles, which results * in 128*1000/1275=100MBps per GHz. Is there room for improvement? * Well, if you compare it to IA-64 implementation, which maintains * X[16] in register bank[!], tends to 4 instructions per CPU clock * cycle and runs in 1003 cycles, 1275 is very good result for 3-way * issue Opteron pipeline and X[16] maintained in memory. So that *if* * there is a way to improve it, *then* the only way would be to try to * offload X[16] updates to SSE unit, but that would require "deeper" * loop unroll, which in turn would naturally cause size blow-up, not * to mention increased complexity! And once again, only *if* it's * actually possible to noticeably improve overall ILP, instruction * level parallelism, on a given CPU implementation in this case. * * Special note on Intel EM64T. While Opteron CPU exhibits perfect * performance ratio of 1.5 between 64- and 32-bit flavors [see above], * [currently available] EM64T CPUs apparently are far from it. On the * contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit * sha256_block:-( This is presumably because 64-bit shifts/rotates * apparently are not atomic instructions, but implemented in microcode. */ /* * OpenSolaris OS modifications * * Sun elects to use this software under the BSD license. * * This source originates from OpenSSL file sha512-x86_64.pl at * ftp://ftp.openssl.org/snapshot/openssl-0.9.8-stable-SNAP-20080131.tar.gz * (presumably for future OpenSSL release 0.9.8h), with these changes: * * 1. Added perl "use strict" and declared variables. * * 2. Added OpenSolaris ENTRY_NP/SET_SIZE macros from * /usr/include/sys/asm_linkage.h, .ident keywords, and lint(1B) guards. * * 3. Removed x86_64-xlate.pl script (not needed for as(1) or gas(1) * assemblers). Replaced the .picmeup macro with assembler code. * * 4. Added 8 to $ctx, as OpenSolaris OS has an extra 4-byte field, "algotype", * at the beginning of SHA2_CTX (the next field is 8-byte aligned). */ /* * This file was generated by a perl script (sha512-x86_64.pl) that were * used to generate sha256 and sha512 variants from the same code base. * The comments from the original file have been pasted above. */ #if defined(lint) || defined(__lint) #include #include void SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num) { (void) ctx, (void) in, (void) num; } #else #define _ASM #include ENTRY_NP(SHA256TransformBlocks) .cfi_startproc ENDBR movq %rsp, %rax .cfi_def_cfa_register %rax push %rbx .cfi_offset %rbx,-16 push %rbp .cfi_offset %rbp,-24 push %r12 .cfi_offset %r12,-32 push %r13 .cfi_offset %r13,-40 push %r14 .cfi_offset %r14,-48 push %r15 .cfi_offset %r15,-56 mov %rsp,%rbp # copy %rsp shl $4,%rdx # num*16 sub $16*4+4*8,%rsp lea (%rsi,%rdx,4),%rdx # inp+num*16*4 and $-64,%rsp # align stack frame add $8,%rdi # Skip OpenSolaris field, "algotype" mov %rdi,16*4+0*8(%rsp) # save ctx, 1st arg mov %rsi,16*4+1*8(%rsp) # save inp, 2nd arg mov %rdx,16*4+2*8(%rsp) # save end pointer, "3rd" arg mov %rbp,16*4+3*8(%rsp) # save copy of %rsp # echo ".cfi_cfa_expression %rsp+88,deref,+56" | # openssl/crypto/perlasm/x86_64-xlate.pl .cfi_escape 0x0f,0x06,0x77,0xd8,0x00,0x06,0x23,0x38 #.picmeup %rbp # The .picmeup pseudo-directive, from perlasm/x86_64_xlate.pl, puts # the address of the "next" instruction into the target register # (%rbp). This generates these 2 instructions: lea .Llea(%rip),%rbp #nop # .picmeup generates a nop for mod 8 alignment--not needed here .Llea: lea K256-.(%rbp),%rbp mov 4*0(%rdi),%eax mov 4*1(%rdi),%ebx mov 4*2(%rdi),%ecx mov 4*3(%rdi),%edx mov 4*4(%rdi),%r8d mov 4*5(%rdi),%r9d mov 4*6(%rdi),%r10d mov 4*7(%rdi),%r11d jmp .Lloop .balign 16 .Lloop: xor %rdi,%rdi mov 4*0(%rsi),%r12d bswap %r12d mov %r8d,%r13d mov %r8d,%r14d mov %r9d,%r15d ror $6,%r13d ror $11,%r14d xor %r10d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r8d,%r15d # (f^g)&e mov %r12d,0(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r11d,%r12d # T1+=h mov %eax,%r11d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %eax,%r13d mov %eax,%r14d ror $2,%r11d ror $13,%r13d mov %eax,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r11d ror $9,%r13d or %ecx,%r14d # a|c xor %r13d,%r11d # h=Sigma0(a) and %ecx,%r15d # a&c add %r12d,%edx # d+=T1 and %ebx,%r14d # (a|c)&b add %r12d,%r11d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r11d # h+=Maj(a,b,c) mov 4*1(%rsi),%r12d bswap %r12d mov %edx,%r13d mov %edx,%r14d mov %r8d,%r15d ror $6,%r13d ror $11,%r14d xor %r9d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %edx,%r15d # (f^g)&e mov %r12d,4(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r10d,%r12d # T1+=h mov %r11d,%r10d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r11d,%r13d mov %r11d,%r14d ror $2,%r10d ror $13,%r13d mov %r11d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r10d ror $9,%r13d or %ebx,%r14d # a|c xor %r13d,%r10d # h=Sigma0(a) and %ebx,%r15d # a&c add %r12d,%ecx # d+=T1 and %eax,%r14d # (a|c)&b add %r12d,%r10d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r10d # h+=Maj(a,b,c) mov 4*2(%rsi),%r12d bswap %r12d mov %ecx,%r13d mov %ecx,%r14d mov %edx,%r15d ror $6,%r13d ror $11,%r14d xor %r8d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ecx,%r15d # (f^g)&e mov %r12d,8(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r9d,%r12d # T1+=h mov %r10d,%r9d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r10d,%r13d mov %r10d,%r14d ror $2,%r9d ror $13,%r13d mov %r10d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r9d ror $9,%r13d or %eax,%r14d # a|c xor %r13d,%r9d # h=Sigma0(a) and %eax,%r15d # a&c add %r12d,%ebx # d+=T1 and %r11d,%r14d # (a|c)&b add %r12d,%r9d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r9d # h+=Maj(a,b,c) mov 4*3(%rsi),%r12d bswap %r12d mov %ebx,%r13d mov %ebx,%r14d mov %ecx,%r15d ror $6,%r13d ror $11,%r14d xor %edx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ebx,%r15d # (f^g)&e mov %r12d,12(%rsp) xor %r14d,%r13d # Sigma1(e) xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r8d,%r12d # T1+=h mov %r9d,%r8d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r9d,%r13d mov %r9d,%r14d ror $2,%r8d ror $13,%r13d mov %r9d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r8d ror $9,%r13d or %r11d,%r14d # a|c xor %r13d,%r8d # h=Sigma0(a) and %r11d,%r15d # a&c add %r12d,%eax # d+=T1 and %r10d,%r14d # (a|c)&b add %r12d,%r8d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r8d # h+=Maj(a,b,c) mov 4*4(%rsi),%r12d bswap %r12d mov %eax,%r13d mov %eax,%r14d mov %ebx,%r15d ror $6,%r13d ror $11,%r14d xor %ecx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %eax,%r15d # (f^g)&e mov %r12d,16(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %edx,%r12d # T1+=h mov %r8d,%edx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r8d,%r13d mov %r8d,%r14d ror $2,%edx ror $13,%r13d mov %r8d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%edx ror $9,%r13d or %r10d,%r14d # a|c xor %r13d,%edx # h=Sigma0(a) and %r10d,%r15d # a&c add %r12d,%r11d # d+=T1 and %r9d,%r14d # (a|c)&b add %r12d,%edx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%edx # h+=Maj(a,b,c) mov 4*5(%rsi),%r12d bswap %r12d mov %r11d,%r13d mov %r11d,%r14d mov %eax,%r15d ror $6,%r13d ror $11,%r14d xor %ebx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r11d,%r15d # (f^g)&e mov %r12d,20(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ecx,%r12d # T1+=h mov %edx,%ecx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %edx,%r13d mov %edx,%r14d ror $2,%ecx ror $13,%r13d mov %edx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ecx ror $9,%r13d or %r9d,%r14d # a|c xor %r13d,%ecx # h=Sigma0(a) and %r9d,%r15d # a&c add %r12d,%r10d # d+=T1 and %r8d,%r14d # (a|c)&b add %r12d,%ecx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ecx # h+=Maj(a,b,c) mov 4*6(%rsi),%r12d bswap %r12d mov %r10d,%r13d mov %r10d,%r14d mov %r11d,%r15d ror $6,%r13d ror $11,%r14d xor %eax,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r10d,%r15d # (f^g)&e mov %r12d,24(%rsp) xor %r14d,%r13d # Sigma1(e) xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ebx,%r12d # T1+=h mov %ecx,%ebx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ecx,%r13d mov %ecx,%r14d ror $2,%ebx ror $13,%r13d mov %ecx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ebx ror $9,%r13d or %r8d,%r14d # a|c xor %r13d,%ebx # h=Sigma0(a) and %r8d,%r15d # a&c add %r12d,%r9d # d+=T1 and %edx,%r14d # (a|c)&b add %r12d,%ebx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ebx # h+=Maj(a,b,c) mov 4*7(%rsi),%r12d bswap %r12d mov %r9d,%r13d mov %r9d,%r14d mov %r10d,%r15d ror $6,%r13d ror $11,%r14d xor %r11d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r9d,%r15d # (f^g)&e mov %r12d,28(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %eax,%r12d # T1+=h mov %ebx,%eax add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ebx,%r13d mov %ebx,%r14d ror $2,%eax ror $13,%r13d mov %ebx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%eax ror $9,%r13d or %edx,%r14d # a|c xor %r13d,%eax # h=Sigma0(a) and %edx,%r15d # a&c add %r12d,%r8d # d+=T1 and %ecx,%r14d # (a|c)&b add %r12d,%eax # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%eax # h+=Maj(a,b,c) mov 4*8(%rsi),%r12d bswap %r12d mov %r8d,%r13d mov %r8d,%r14d mov %r9d,%r15d ror $6,%r13d ror $11,%r14d xor %r10d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r8d,%r15d # (f^g)&e mov %r12d,32(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r11d,%r12d # T1+=h mov %eax,%r11d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %eax,%r13d mov %eax,%r14d ror $2,%r11d ror $13,%r13d mov %eax,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r11d ror $9,%r13d or %ecx,%r14d # a|c xor %r13d,%r11d # h=Sigma0(a) and %ecx,%r15d # a&c add %r12d,%edx # d+=T1 and %ebx,%r14d # (a|c)&b add %r12d,%r11d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r11d # h+=Maj(a,b,c) mov 4*9(%rsi),%r12d bswap %r12d mov %edx,%r13d mov %edx,%r14d mov %r8d,%r15d ror $6,%r13d ror $11,%r14d xor %r9d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %edx,%r15d # (f^g)&e mov %r12d,36(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r10d,%r12d # T1+=h mov %r11d,%r10d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r11d,%r13d mov %r11d,%r14d ror $2,%r10d ror $13,%r13d mov %r11d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r10d ror $9,%r13d or %ebx,%r14d # a|c xor %r13d,%r10d # h=Sigma0(a) and %ebx,%r15d # a&c add %r12d,%ecx # d+=T1 and %eax,%r14d # (a|c)&b add %r12d,%r10d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r10d # h+=Maj(a,b,c) mov 4*10(%rsi),%r12d bswap %r12d mov %ecx,%r13d mov %ecx,%r14d mov %edx,%r15d ror $6,%r13d ror $11,%r14d xor %r8d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ecx,%r15d # (f^g)&e mov %r12d,40(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r9d,%r12d # T1+=h mov %r10d,%r9d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r10d,%r13d mov %r10d,%r14d ror $2,%r9d ror $13,%r13d mov %r10d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r9d ror $9,%r13d or %eax,%r14d # a|c xor %r13d,%r9d # h=Sigma0(a) and %eax,%r15d # a&c add %r12d,%ebx # d+=T1 and %r11d,%r14d # (a|c)&b add %r12d,%r9d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r9d # h+=Maj(a,b,c) mov 4*11(%rsi),%r12d bswap %r12d mov %ebx,%r13d mov %ebx,%r14d mov %ecx,%r15d ror $6,%r13d ror $11,%r14d xor %edx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ebx,%r15d # (f^g)&e mov %r12d,44(%rsp) xor %r14d,%r13d # Sigma1(e) xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r8d,%r12d # T1+=h mov %r9d,%r8d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r9d,%r13d mov %r9d,%r14d ror $2,%r8d ror $13,%r13d mov %r9d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r8d ror $9,%r13d or %r11d,%r14d # a|c xor %r13d,%r8d # h=Sigma0(a) and %r11d,%r15d # a&c add %r12d,%eax # d+=T1 and %r10d,%r14d # (a|c)&b add %r12d,%r8d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r8d # h+=Maj(a,b,c) mov 4*12(%rsi),%r12d bswap %r12d mov %eax,%r13d mov %eax,%r14d mov %ebx,%r15d ror $6,%r13d ror $11,%r14d xor %ecx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %eax,%r15d # (f^g)&e mov %r12d,48(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %edx,%r12d # T1+=h mov %r8d,%edx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r8d,%r13d mov %r8d,%r14d ror $2,%edx ror $13,%r13d mov %r8d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%edx ror $9,%r13d or %r10d,%r14d # a|c xor %r13d,%edx # h=Sigma0(a) and %r10d,%r15d # a&c add %r12d,%r11d # d+=T1 and %r9d,%r14d # (a|c)&b add %r12d,%edx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%edx # h+=Maj(a,b,c) mov 4*13(%rsi),%r12d bswap %r12d mov %r11d,%r13d mov %r11d,%r14d mov %eax,%r15d ror $6,%r13d ror $11,%r14d xor %ebx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r11d,%r15d # (f^g)&e mov %r12d,52(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ecx,%r12d # T1+=h mov %edx,%ecx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %edx,%r13d mov %edx,%r14d ror $2,%ecx ror $13,%r13d mov %edx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ecx ror $9,%r13d or %r9d,%r14d # a|c xor %r13d,%ecx # h=Sigma0(a) and %r9d,%r15d # a&c add %r12d,%r10d # d+=T1 and %r8d,%r14d # (a|c)&b add %r12d,%ecx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ecx # h+=Maj(a,b,c) mov 4*14(%rsi),%r12d bswap %r12d mov %r10d,%r13d mov %r10d,%r14d mov %r11d,%r15d ror $6,%r13d ror $11,%r14d xor %eax,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r10d,%r15d # (f^g)&e mov %r12d,56(%rsp) xor %r14d,%r13d # Sigma1(e) xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ebx,%r12d # T1+=h mov %ecx,%ebx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ecx,%r13d mov %ecx,%r14d ror $2,%ebx ror $13,%r13d mov %ecx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ebx ror $9,%r13d or %r8d,%r14d # a|c xor %r13d,%ebx # h=Sigma0(a) and %r8d,%r15d # a&c add %r12d,%r9d # d+=T1 and %edx,%r14d # (a|c)&b add %r12d,%ebx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ebx # h+=Maj(a,b,c) mov 4*15(%rsi),%r12d bswap %r12d mov %r9d,%r13d mov %r9d,%r14d mov %r10d,%r15d ror $6,%r13d ror $11,%r14d xor %r11d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r9d,%r15d # (f^g)&e mov %r12d,60(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %eax,%r12d # T1+=h mov %ebx,%eax add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ebx,%r13d mov %ebx,%r14d ror $2,%eax ror $13,%r13d mov %ebx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%eax ror $9,%r13d or %edx,%r14d # a|c xor %r13d,%eax # h=Sigma0(a) and %edx,%r15d # a&c add %r12d,%r8d # d+=T1 and %ecx,%r14d # (a|c)&b add %r12d,%eax # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%eax # h+=Maj(a,b,c) jmp .Lrounds_16_xx .balign 16 .Lrounds_16_xx: mov 4(%rsp),%r13d mov 56(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 36(%rsp),%r12d add 0(%rsp),%r12d mov %r8d,%r13d mov %r8d,%r14d mov %r9d,%r15d ror $6,%r13d ror $11,%r14d xor %r10d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r8d,%r15d # (f^g)&e mov %r12d,0(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r11d,%r12d # T1+=h mov %eax,%r11d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %eax,%r13d mov %eax,%r14d ror $2,%r11d ror $13,%r13d mov %eax,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r11d ror $9,%r13d or %ecx,%r14d # a|c xor %r13d,%r11d # h=Sigma0(a) and %ecx,%r15d # a&c add %r12d,%edx # d+=T1 and %ebx,%r14d # (a|c)&b add %r12d,%r11d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r11d # h+=Maj(a,b,c) mov 8(%rsp),%r13d mov 60(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 40(%rsp),%r12d add 4(%rsp),%r12d mov %edx,%r13d mov %edx,%r14d mov %r8d,%r15d ror $6,%r13d ror $11,%r14d xor %r9d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %edx,%r15d # (f^g)&e mov %r12d,4(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r10d,%r12d # T1+=h mov %r11d,%r10d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r11d,%r13d mov %r11d,%r14d ror $2,%r10d ror $13,%r13d mov %r11d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r10d ror $9,%r13d or %ebx,%r14d # a|c xor %r13d,%r10d # h=Sigma0(a) and %ebx,%r15d # a&c add %r12d,%ecx # d+=T1 and %eax,%r14d # (a|c)&b add %r12d,%r10d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r10d # h+=Maj(a,b,c) mov 12(%rsp),%r13d mov 0(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 44(%rsp),%r12d add 8(%rsp),%r12d mov %ecx,%r13d mov %ecx,%r14d mov %edx,%r15d ror $6,%r13d ror $11,%r14d xor %r8d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ecx,%r15d # (f^g)&e mov %r12d,8(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r9d,%r12d # T1+=h mov %r10d,%r9d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r10d,%r13d mov %r10d,%r14d ror $2,%r9d ror $13,%r13d mov %r10d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r9d ror $9,%r13d or %eax,%r14d # a|c xor %r13d,%r9d # h=Sigma0(a) and %eax,%r15d # a&c add %r12d,%ebx # d+=T1 and %r11d,%r14d # (a|c)&b add %r12d,%r9d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r9d # h+=Maj(a,b,c) mov 16(%rsp),%r13d mov 4(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 48(%rsp),%r12d add 12(%rsp),%r12d mov %ebx,%r13d mov %ebx,%r14d mov %ecx,%r15d ror $6,%r13d ror $11,%r14d xor %edx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ebx,%r15d # (f^g)&e mov %r12d,12(%rsp) xor %r14d,%r13d # Sigma1(e) xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r8d,%r12d # T1+=h mov %r9d,%r8d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r9d,%r13d mov %r9d,%r14d ror $2,%r8d ror $13,%r13d mov %r9d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r8d ror $9,%r13d or %r11d,%r14d # a|c xor %r13d,%r8d # h=Sigma0(a) and %r11d,%r15d # a&c add %r12d,%eax # d+=T1 and %r10d,%r14d # (a|c)&b add %r12d,%r8d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r8d # h+=Maj(a,b,c) mov 20(%rsp),%r13d mov 8(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 52(%rsp),%r12d add 16(%rsp),%r12d mov %eax,%r13d mov %eax,%r14d mov %ebx,%r15d ror $6,%r13d ror $11,%r14d xor %ecx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %eax,%r15d # (f^g)&e mov %r12d,16(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %edx,%r12d # T1+=h mov %r8d,%edx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r8d,%r13d mov %r8d,%r14d ror $2,%edx ror $13,%r13d mov %r8d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%edx ror $9,%r13d or %r10d,%r14d # a|c xor %r13d,%edx # h=Sigma0(a) and %r10d,%r15d # a&c add %r12d,%r11d # d+=T1 and %r9d,%r14d # (a|c)&b add %r12d,%edx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%edx # h+=Maj(a,b,c) mov 24(%rsp),%r13d mov 12(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 56(%rsp),%r12d add 20(%rsp),%r12d mov %r11d,%r13d mov %r11d,%r14d mov %eax,%r15d ror $6,%r13d ror $11,%r14d xor %ebx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r11d,%r15d # (f^g)&e mov %r12d,20(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ecx,%r12d # T1+=h mov %edx,%ecx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %edx,%r13d mov %edx,%r14d ror $2,%ecx ror $13,%r13d mov %edx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ecx ror $9,%r13d or %r9d,%r14d # a|c xor %r13d,%ecx # h=Sigma0(a) and %r9d,%r15d # a&c add %r12d,%r10d # d+=T1 and %r8d,%r14d # (a|c)&b add %r12d,%ecx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ecx # h+=Maj(a,b,c) mov 28(%rsp),%r13d mov 16(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 60(%rsp),%r12d add 24(%rsp),%r12d mov %r10d,%r13d mov %r10d,%r14d mov %r11d,%r15d ror $6,%r13d ror $11,%r14d xor %eax,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r10d,%r15d # (f^g)&e mov %r12d,24(%rsp) xor %r14d,%r13d # Sigma1(e) xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ebx,%r12d # T1+=h mov %ecx,%ebx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ecx,%r13d mov %ecx,%r14d ror $2,%ebx ror $13,%r13d mov %ecx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ebx ror $9,%r13d or %r8d,%r14d # a|c xor %r13d,%ebx # h=Sigma0(a) and %r8d,%r15d # a&c add %r12d,%r9d # d+=T1 and %edx,%r14d # (a|c)&b add %r12d,%ebx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ebx # h+=Maj(a,b,c) mov 32(%rsp),%r13d mov 20(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 0(%rsp),%r12d add 28(%rsp),%r12d mov %r9d,%r13d mov %r9d,%r14d mov %r10d,%r15d ror $6,%r13d ror $11,%r14d xor %r11d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r9d,%r15d # (f^g)&e mov %r12d,28(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %eax,%r12d # T1+=h mov %ebx,%eax add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ebx,%r13d mov %ebx,%r14d ror $2,%eax ror $13,%r13d mov %ebx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%eax ror $9,%r13d or %edx,%r14d # a|c xor %r13d,%eax # h=Sigma0(a) and %edx,%r15d # a&c add %r12d,%r8d # d+=T1 and %ecx,%r14d # (a|c)&b add %r12d,%eax # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%eax # h+=Maj(a,b,c) mov 36(%rsp),%r13d mov 24(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 4(%rsp),%r12d add 32(%rsp),%r12d mov %r8d,%r13d mov %r8d,%r14d mov %r9d,%r15d ror $6,%r13d ror $11,%r14d xor %r10d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r8d,%r15d # (f^g)&e mov %r12d,32(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r10d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r11d,%r12d # T1+=h mov %eax,%r11d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %eax,%r13d mov %eax,%r14d ror $2,%r11d ror $13,%r13d mov %eax,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r11d ror $9,%r13d or %ecx,%r14d # a|c xor %r13d,%r11d # h=Sigma0(a) and %ecx,%r15d # a&c add %r12d,%edx # d+=T1 and %ebx,%r14d # (a|c)&b add %r12d,%r11d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r11d # h+=Maj(a,b,c) mov 40(%rsp),%r13d mov 28(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 8(%rsp),%r12d add 36(%rsp),%r12d mov %edx,%r13d mov %edx,%r14d mov %r8d,%r15d ror $6,%r13d ror $11,%r14d xor %r9d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %edx,%r15d # (f^g)&e mov %r12d,36(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r9d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r10d,%r12d # T1+=h mov %r11d,%r10d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r11d,%r13d mov %r11d,%r14d ror $2,%r10d ror $13,%r13d mov %r11d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r10d ror $9,%r13d or %ebx,%r14d # a|c xor %r13d,%r10d # h=Sigma0(a) and %ebx,%r15d # a&c add %r12d,%ecx # d+=T1 and %eax,%r14d # (a|c)&b add %r12d,%r10d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r10d # h+=Maj(a,b,c) mov 44(%rsp),%r13d mov 32(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 12(%rsp),%r12d add 40(%rsp),%r12d mov %ecx,%r13d mov %ecx,%r14d mov %edx,%r15d ror $6,%r13d ror $11,%r14d xor %r8d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ecx,%r15d # (f^g)&e mov %r12d,40(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r8d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r9d,%r12d # T1+=h mov %r10d,%r9d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r10d,%r13d mov %r10d,%r14d ror $2,%r9d ror $13,%r13d mov %r10d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r9d ror $9,%r13d or %eax,%r14d # a|c xor %r13d,%r9d # h=Sigma0(a) and %eax,%r15d # a&c add %r12d,%ebx # d+=T1 and %r11d,%r14d # (a|c)&b add %r12d,%r9d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r9d # h+=Maj(a,b,c) mov 48(%rsp),%r13d mov 36(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 16(%rsp),%r12d add 44(%rsp),%r12d mov %ebx,%r13d mov %ebx,%r14d mov %ecx,%r15d ror $6,%r13d ror $11,%r14d xor %edx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %ebx,%r15d # (f^g)&e mov %r12d,44(%rsp) xor %r14d,%r13d # Sigma1(e) xor %edx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %r8d,%r12d # T1+=h mov %r9d,%r8d add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r9d,%r13d mov %r9d,%r14d ror $2,%r8d ror $13,%r13d mov %r9d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%r8d ror $9,%r13d or %r11d,%r14d # a|c xor %r13d,%r8d # h=Sigma0(a) and %r11d,%r15d # a&c add %r12d,%eax # d+=T1 and %r10d,%r14d # (a|c)&b add %r12d,%r8d # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%r8d # h+=Maj(a,b,c) mov 52(%rsp),%r13d mov 40(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 20(%rsp),%r12d add 48(%rsp),%r12d mov %eax,%r13d mov %eax,%r14d mov %ebx,%r15d ror $6,%r13d ror $11,%r14d xor %ecx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %eax,%r15d # (f^g)&e mov %r12d,48(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ecx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %edx,%r12d # T1+=h mov %r8d,%edx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %r8d,%r13d mov %r8d,%r14d ror $2,%edx ror $13,%r13d mov %r8d,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%edx ror $9,%r13d or %r10d,%r14d # a|c xor %r13d,%edx # h=Sigma0(a) and %r10d,%r15d # a&c add %r12d,%r11d # d+=T1 and %r9d,%r14d # (a|c)&b add %r12d,%edx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%edx # h+=Maj(a,b,c) mov 56(%rsp),%r13d mov 44(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 24(%rsp),%r12d add 52(%rsp),%r12d mov %r11d,%r13d mov %r11d,%r14d mov %eax,%r15d ror $6,%r13d ror $11,%r14d xor %ebx,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r11d,%r15d # (f^g)&e mov %r12d,52(%rsp) xor %r14d,%r13d # Sigma1(e) xor %ebx,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ecx,%r12d # T1+=h mov %edx,%ecx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %edx,%r13d mov %edx,%r14d ror $2,%ecx ror $13,%r13d mov %edx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ecx ror $9,%r13d or %r9d,%r14d # a|c xor %r13d,%ecx # h=Sigma0(a) and %r9d,%r15d # a&c add %r12d,%r10d # d+=T1 and %r8d,%r14d # (a|c)&b add %r12d,%ecx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ecx # h+=Maj(a,b,c) mov 60(%rsp),%r13d mov 48(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 28(%rsp),%r12d add 56(%rsp),%r12d mov %r10d,%r13d mov %r10d,%r14d mov %r11d,%r15d ror $6,%r13d ror $11,%r14d xor %eax,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r10d,%r15d # (f^g)&e mov %r12d,56(%rsp) xor %r14d,%r13d # Sigma1(e) xor %eax,%r15d # Ch(e,f,g)=((f^g)&e)^g add %ebx,%r12d # T1+=h mov %ecx,%ebx add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ecx,%r13d mov %ecx,%r14d ror $2,%ebx ror $13,%r13d mov %ecx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%ebx ror $9,%r13d or %r8d,%r14d # a|c xor %r13d,%ebx # h=Sigma0(a) and %r8d,%r15d # a&c add %r12d,%r9d # d+=T1 and %edx,%r14d # (a|c)&b add %r12d,%ebx # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%ebx # h+=Maj(a,b,c) mov 0(%rsp),%r13d mov 52(%rsp),%r12d mov %r13d,%r15d shr $3,%r13d ror $7,%r15d xor %r15d,%r13d ror $11,%r15d xor %r15d,%r13d # sigma0(X[(i+1)&0xf]) mov %r12d,%r14d shr $10,%r12d ror $17,%r14d xor %r14d,%r12d ror $2,%r14d xor %r14d,%r12d # sigma1(X[(i+14)&0xf]) add %r13d,%r12d add 32(%rsp),%r12d add 60(%rsp),%r12d mov %r9d,%r13d mov %r9d,%r14d mov %r10d,%r15d ror $6,%r13d ror $11,%r14d xor %r11d,%r15d # f^g xor %r14d,%r13d ror $14,%r14d and %r9d,%r15d # (f^g)&e mov %r12d,60(%rsp) xor %r14d,%r13d # Sigma1(e) xor %r11d,%r15d # Ch(e,f,g)=((f^g)&e)^g add %eax,%r12d # T1+=h mov %ebx,%eax add %r13d,%r12d # T1+=Sigma1(e) add %r15d,%r12d # T1+=Ch(e,f,g) mov %ebx,%r13d mov %ebx,%r14d ror $2,%eax ror $13,%r13d mov %ebx,%r15d add (%rbp,%rdi,4),%r12d # T1+=K[round] xor %r13d,%eax ror $9,%r13d or %edx,%r14d # a|c xor %r13d,%eax # h=Sigma0(a) and %edx,%r15d # a&c add %r12d,%r8d # d+=T1 and %ecx,%r14d # (a|c)&b add %r12d,%eax # h+=T1 or %r15d,%r14d # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14d,%eax # h+=Maj(a,b,c) cmp $64,%rdi jb .Lrounds_16_xx mov 16*4+0*8(%rsp),%rdi lea 16*4(%rsi),%rsi add 4*0(%rdi),%eax add 4*1(%rdi),%ebx add 4*2(%rdi),%ecx add 4*3(%rdi),%edx add 4*4(%rdi),%r8d add 4*5(%rdi),%r9d add 4*6(%rdi),%r10d add 4*7(%rdi),%r11d cmp 16*4+2*8(%rsp),%rsi mov %eax,4*0(%rdi) mov %ebx,4*1(%rdi) mov %ecx,4*2(%rdi) mov %edx,4*3(%rdi) mov %r8d,4*4(%rdi) mov %r9d,4*5(%rdi) mov %r10d,4*6(%rdi) mov %r11d,4*7(%rdi) jb .Lloop mov 16*4+3*8(%rsp),%rsp .cfi_def_cfa %rsp,56 pop %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 pop %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 pop %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 pop %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 pop %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp pop %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx RET .cfi_endproc SET_SIZE(SHA256TransformBlocks) -.section .rodata +SECTION_STATIC .balign 64 SET_OBJ(K256) K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 #endif /* !lint && !__lint */ #ifdef __ELF__ .section .note.GNU-stack,"",%progbits #endif diff --git a/module/icp/asm-x86_64/sha2/sha512_impl.S b/module/icp/asm-x86_64/sha2/sha512_impl.S index 520f5b6dab24..b2f7d4863d8a 100644 --- a/module/icp/asm-x86_64/sha2/sha512_impl.S +++ b/module/icp/asm-x86_64/sha2/sha512_impl.S @@ -1,2116 +1,2115 @@ /* * ==================================================================== * Written by Andy Polyakov for the OpenSSL * project. Rights for redistribution and usage in source and binary * forms are granted according to the OpenSSL license. * ==================================================================== * * sha256/512_block procedure for x86_64. * * 40% improvement over compiler-generated code on Opteron. On EM64T * sha256 was observed to run >80% faster and sha512 - >40%. No magical * tricks, just straight implementation... I really wonder why gcc * [being armed with inline assembler] fails to generate as fast code. * The only thing which is cool about this module is that it's very * same instruction sequence used for both SHA-256 and SHA-512. In * former case the instructions operate on 32-bit operands, while in * latter - on 64-bit ones. All I had to do is to get one flavor right, * the other one passed the test right away:-) * * sha256_block runs in ~1005 cycles on Opteron, which gives you * asymptotic performance of 64*1000/1005=63.7MBps times CPU clock * frequency in GHz. sha512_block runs in ~1275 cycles, which results * in 128*1000/1275=100MBps per GHz. Is there room for improvement? * Well, if you compare it to IA-64 implementation, which maintains * X[16] in register bank[!], tends to 4 instructions per CPU clock * cycle and runs in 1003 cycles, 1275 is very good result for 3-way * issue Opteron pipeline and X[16] maintained in memory. So that *if* * there is a way to improve it, *then* the only way would be to try to * offload X[16] updates to SSE unit, but that would require "deeper" * loop unroll, which in turn would naturally cause size blow-up, not * to mention increased complexity! And once again, only *if* it's * actually possible to noticeably improve overall ILP, instruction * level parallelism, on a given CPU implementation in this case. * * Special note on Intel EM64T. While Opteron CPU exhibits perfect * performance ratio of 1.5 between 64- and 32-bit flavors [see above], * [currently available] EM64T CPUs apparently are far from it. On the * contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit * sha256_block:-( This is presumably because 64-bit shifts/rotates * apparently are not atomic instructions, but implemented in microcode. */ /* * OpenSolaris OS modifications * * Sun elects to use this software under the BSD license. * * This source originates from OpenSSL file sha512-x86_64.pl at * ftp://ftp.openssl.org/snapshot/openssl-0.9.8-stable-SNAP-20080131.tar.gz * (presumably for future OpenSSL release 0.9.8h), with these changes: * * 1. Added perl "use strict" and declared variables. * * 2. Added OpenSolaris ENTRY_NP/SET_SIZE macros from * /usr/include/sys/asm_linkage.h, .ident keywords, and lint(1B) guards. * * 3. Removed x86_64-xlate.pl script (not needed for as(1) or gas(1) * assemblers). Replaced the .picmeup macro with assembler code. * * 4. Added 8 to $ctx, as OpenSolaris OS has an extra 4-byte field, "algotype", * at the beginning of SHA2_CTX (the next field is 8-byte aligned). */ /* * This file was generated by a perl script (sha512-x86_64.pl) that were * used to generate sha256 and sha512 variants from the same code base. * The comments from the original file have been pasted above. */ #if defined(lint) || defined(__lint) #include #include void SHA512TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num) { (void) ctx, (void) in, (void) num; } #else #define _ASM #include ENTRY_NP(SHA512TransformBlocks) .cfi_startproc ENDBR movq %rsp, %rax .cfi_def_cfa_register %rax push %rbx .cfi_offset %rbx,-16 push %rbp .cfi_offset %rbp,-24 push %r12 .cfi_offset %r12,-32 push %r13 .cfi_offset %r13,-40 push %r14 .cfi_offset %r14,-48 push %r15 .cfi_offset %r15,-56 mov %rsp,%rbp # copy %rsp shl $4,%rdx # num*16 sub $16*8+4*8,%rsp lea (%rsi,%rdx,8),%rdx # inp+num*16*8 and $-64,%rsp # align stack frame add $8,%rdi # Skip OpenSolaris field, "algotype" mov %rdi,16*8+0*8(%rsp) # save ctx, 1st arg mov %rsi,16*8+1*8(%rsp) # save inp, 2nd arg mov %rdx,16*8+2*8(%rsp) # save end pointer, "3rd" arg mov %rbp,16*8+3*8(%rsp) # save copy of %rsp # echo ".cfi_cfa_expression %rsp+152,deref,+56" | # openssl/crypto/perlasm/x86_64-xlate.pl .cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x38 #.picmeup %rbp # The .picmeup pseudo-directive, from perlasm/x86_64_xlate.pl, puts # the address of the "next" instruction into the target register # (%rbp). This generates these 2 instructions: lea .Llea(%rip),%rbp #nop # .picmeup generates a nop for mod 8 alignment--not needed here .Llea: lea K512-.(%rbp),%rbp mov 8*0(%rdi),%rax mov 8*1(%rdi),%rbx mov 8*2(%rdi),%rcx mov 8*3(%rdi),%rdx mov 8*4(%rdi),%r8 mov 8*5(%rdi),%r9 mov 8*6(%rdi),%r10 mov 8*7(%rdi),%r11 jmp .Lloop .balign 16 .Lloop: xor %rdi,%rdi mov 8*0(%rsi),%r12 bswap %r12 mov %r8,%r13 mov %r8,%r14 mov %r9,%r15 ror $14,%r13 ror $18,%r14 xor %r10,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r8,%r15 # (f^g)&e mov %r12,0(%rsp) xor %r14,%r13 # Sigma1(e) xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r11,%r12 # T1+=h mov %rax,%r11 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rax,%r13 mov %rax,%r14 ror $28,%r11 ror $34,%r13 mov %rax,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r11 ror $5,%r13 or %rcx,%r14 # a|c xor %r13,%r11 # h=Sigma0(a) and %rcx,%r15 # a&c add %r12,%rdx # d+=T1 and %rbx,%r14 # (a|c)&b add %r12,%r11 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r11 # h+=Maj(a,b,c) mov 8*1(%rsi),%r12 bswap %r12 mov %rdx,%r13 mov %rdx,%r14 mov %r8,%r15 ror $14,%r13 ror $18,%r14 xor %r9,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rdx,%r15 # (f^g)&e mov %r12,8(%rsp) xor %r14,%r13 # Sigma1(e) xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r10,%r12 # T1+=h mov %r11,%r10 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r11,%r13 mov %r11,%r14 ror $28,%r10 ror $34,%r13 mov %r11,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r10 ror $5,%r13 or %rbx,%r14 # a|c xor %r13,%r10 # h=Sigma0(a) and %rbx,%r15 # a&c add %r12,%rcx # d+=T1 and %rax,%r14 # (a|c)&b add %r12,%r10 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r10 # h+=Maj(a,b,c) mov 8*2(%rsi),%r12 bswap %r12 mov %rcx,%r13 mov %rcx,%r14 mov %rdx,%r15 ror $14,%r13 ror $18,%r14 xor %r8,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rcx,%r15 # (f^g)&e mov %r12,16(%rsp) xor %r14,%r13 # Sigma1(e) xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r9,%r12 # T1+=h mov %r10,%r9 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r10,%r13 mov %r10,%r14 ror $28,%r9 ror $34,%r13 mov %r10,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r9 ror $5,%r13 or %rax,%r14 # a|c xor %r13,%r9 # h=Sigma0(a) and %rax,%r15 # a&c add %r12,%rbx # d+=T1 and %r11,%r14 # (a|c)&b add %r12,%r9 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r9 # h+=Maj(a,b,c) mov 8*3(%rsi),%r12 bswap %r12 mov %rbx,%r13 mov %rbx,%r14 mov %rcx,%r15 ror $14,%r13 ror $18,%r14 xor %rdx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rbx,%r15 # (f^g)&e mov %r12,24(%rsp) xor %r14,%r13 # Sigma1(e) xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r8,%r12 # T1+=h mov %r9,%r8 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r9,%r13 mov %r9,%r14 ror $28,%r8 ror $34,%r13 mov %r9,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r8 ror $5,%r13 or %r11,%r14 # a|c xor %r13,%r8 # h=Sigma0(a) and %r11,%r15 # a&c add %r12,%rax # d+=T1 and %r10,%r14 # (a|c)&b add %r12,%r8 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r8 # h+=Maj(a,b,c) mov 8*4(%rsi),%r12 bswap %r12 mov %rax,%r13 mov %rax,%r14 mov %rbx,%r15 ror $14,%r13 ror $18,%r14 xor %rcx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rax,%r15 # (f^g)&e mov %r12,32(%rsp) xor %r14,%r13 # Sigma1(e) xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rdx,%r12 # T1+=h mov %r8,%rdx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r8,%r13 mov %r8,%r14 ror $28,%rdx ror $34,%r13 mov %r8,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rdx ror $5,%r13 or %r10,%r14 # a|c xor %r13,%rdx # h=Sigma0(a) and %r10,%r15 # a&c add %r12,%r11 # d+=T1 and %r9,%r14 # (a|c)&b add %r12,%rdx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rdx # h+=Maj(a,b,c) mov 8*5(%rsi),%r12 bswap %r12 mov %r11,%r13 mov %r11,%r14 mov %rax,%r15 ror $14,%r13 ror $18,%r14 xor %rbx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r11,%r15 # (f^g)&e mov %r12,40(%rsp) xor %r14,%r13 # Sigma1(e) xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rcx,%r12 # T1+=h mov %rdx,%rcx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rdx,%r13 mov %rdx,%r14 ror $28,%rcx ror $34,%r13 mov %rdx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rcx ror $5,%r13 or %r9,%r14 # a|c xor %r13,%rcx # h=Sigma0(a) and %r9,%r15 # a&c add %r12,%r10 # d+=T1 and %r8,%r14 # (a|c)&b add %r12,%rcx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rcx # h+=Maj(a,b,c) mov 8*6(%rsi),%r12 bswap %r12 mov %r10,%r13 mov %r10,%r14 mov %r11,%r15 ror $14,%r13 ror $18,%r14 xor %rax,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r10,%r15 # (f^g)&e mov %r12,48(%rsp) xor %r14,%r13 # Sigma1(e) xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rbx,%r12 # T1+=h mov %rcx,%rbx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rcx,%r13 mov %rcx,%r14 ror $28,%rbx ror $34,%r13 mov %rcx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rbx ror $5,%r13 or %r8,%r14 # a|c xor %r13,%rbx # h=Sigma0(a) and %r8,%r15 # a&c add %r12,%r9 # d+=T1 and %rdx,%r14 # (a|c)&b add %r12,%rbx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rbx # h+=Maj(a,b,c) mov 8*7(%rsi),%r12 bswap %r12 mov %r9,%r13 mov %r9,%r14 mov %r10,%r15 ror $14,%r13 ror $18,%r14 xor %r11,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r9,%r15 # (f^g)&e mov %r12,56(%rsp) xor %r14,%r13 # Sigma1(e) xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rax,%r12 # T1+=h mov %rbx,%rax add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rbx,%r13 mov %rbx,%r14 ror $28,%rax ror $34,%r13 mov %rbx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rax ror $5,%r13 or %rdx,%r14 # a|c xor %r13,%rax # h=Sigma0(a) and %rdx,%r15 # a&c add %r12,%r8 # d+=T1 and %rcx,%r14 # (a|c)&b add %r12,%rax # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rax # h+=Maj(a,b,c) mov 8*8(%rsi),%r12 bswap %r12 mov %r8,%r13 mov %r8,%r14 mov %r9,%r15 ror $14,%r13 ror $18,%r14 xor %r10,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r8,%r15 # (f^g)&e mov %r12,64(%rsp) xor %r14,%r13 # Sigma1(e) xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r11,%r12 # T1+=h mov %rax,%r11 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rax,%r13 mov %rax,%r14 ror $28,%r11 ror $34,%r13 mov %rax,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r11 ror $5,%r13 or %rcx,%r14 # a|c xor %r13,%r11 # h=Sigma0(a) and %rcx,%r15 # a&c add %r12,%rdx # d+=T1 and %rbx,%r14 # (a|c)&b add %r12,%r11 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r11 # h+=Maj(a,b,c) mov 8*9(%rsi),%r12 bswap %r12 mov %rdx,%r13 mov %rdx,%r14 mov %r8,%r15 ror $14,%r13 ror $18,%r14 xor %r9,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rdx,%r15 # (f^g)&e mov %r12,72(%rsp) xor %r14,%r13 # Sigma1(e) xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r10,%r12 # T1+=h mov %r11,%r10 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r11,%r13 mov %r11,%r14 ror $28,%r10 ror $34,%r13 mov %r11,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r10 ror $5,%r13 or %rbx,%r14 # a|c xor %r13,%r10 # h=Sigma0(a) and %rbx,%r15 # a&c add %r12,%rcx # d+=T1 and %rax,%r14 # (a|c)&b add %r12,%r10 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r10 # h+=Maj(a,b,c) mov 8*10(%rsi),%r12 bswap %r12 mov %rcx,%r13 mov %rcx,%r14 mov %rdx,%r15 ror $14,%r13 ror $18,%r14 xor %r8,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rcx,%r15 # (f^g)&e mov %r12,80(%rsp) xor %r14,%r13 # Sigma1(e) xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r9,%r12 # T1+=h mov %r10,%r9 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r10,%r13 mov %r10,%r14 ror $28,%r9 ror $34,%r13 mov %r10,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r9 ror $5,%r13 or %rax,%r14 # a|c xor %r13,%r9 # h=Sigma0(a) and %rax,%r15 # a&c add %r12,%rbx # d+=T1 and %r11,%r14 # (a|c)&b add %r12,%r9 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r9 # h+=Maj(a,b,c) mov 8*11(%rsi),%r12 bswap %r12 mov %rbx,%r13 mov %rbx,%r14 mov %rcx,%r15 ror $14,%r13 ror $18,%r14 xor %rdx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rbx,%r15 # (f^g)&e mov %r12,88(%rsp) xor %r14,%r13 # Sigma1(e) xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r8,%r12 # T1+=h mov %r9,%r8 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r9,%r13 mov %r9,%r14 ror $28,%r8 ror $34,%r13 mov %r9,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r8 ror $5,%r13 or %r11,%r14 # a|c xor %r13,%r8 # h=Sigma0(a) and %r11,%r15 # a&c add %r12,%rax # d+=T1 and %r10,%r14 # (a|c)&b add %r12,%r8 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r8 # h+=Maj(a,b,c) mov 8*12(%rsi),%r12 bswap %r12 mov %rax,%r13 mov %rax,%r14 mov %rbx,%r15 ror $14,%r13 ror $18,%r14 xor %rcx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rax,%r15 # (f^g)&e mov %r12,96(%rsp) xor %r14,%r13 # Sigma1(e) xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rdx,%r12 # T1+=h mov %r8,%rdx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r8,%r13 mov %r8,%r14 ror $28,%rdx ror $34,%r13 mov %r8,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rdx ror $5,%r13 or %r10,%r14 # a|c xor %r13,%rdx # h=Sigma0(a) and %r10,%r15 # a&c add %r12,%r11 # d+=T1 and %r9,%r14 # (a|c)&b add %r12,%rdx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rdx # h+=Maj(a,b,c) mov 8*13(%rsi),%r12 bswap %r12 mov %r11,%r13 mov %r11,%r14 mov %rax,%r15 ror $14,%r13 ror $18,%r14 xor %rbx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r11,%r15 # (f^g)&e mov %r12,104(%rsp) xor %r14,%r13 # Sigma1(e) xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rcx,%r12 # T1+=h mov %rdx,%rcx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rdx,%r13 mov %rdx,%r14 ror $28,%rcx ror $34,%r13 mov %rdx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rcx ror $5,%r13 or %r9,%r14 # a|c xor %r13,%rcx # h=Sigma0(a) and %r9,%r15 # a&c add %r12,%r10 # d+=T1 and %r8,%r14 # (a|c)&b add %r12,%rcx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rcx # h+=Maj(a,b,c) mov 8*14(%rsi),%r12 bswap %r12 mov %r10,%r13 mov %r10,%r14 mov %r11,%r15 ror $14,%r13 ror $18,%r14 xor %rax,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r10,%r15 # (f^g)&e mov %r12,112(%rsp) xor %r14,%r13 # Sigma1(e) xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rbx,%r12 # T1+=h mov %rcx,%rbx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rcx,%r13 mov %rcx,%r14 ror $28,%rbx ror $34,%r13 mov %rcx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rbx ror $5,%r13 or %r8,%r14 # a|c xor %r13,%rbx # h=Sigma0(a) and %r8,%r15 # a&c add %r12,%r9 # d+=T1 and %rdx,%r14 # (a|c)&b add %r12,%rbx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rbx # h+=Maj(a,b,c) mov 8*15(%rsi),%r12 bswap %r12 mov %r9,%r13 mov %r9,%r14 mov %r10,%r15 ror $14,%r13 ror $18,%r14 xor %r11,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r9,%r15 # (f^g)&e mov %r12,120(%rsp) xor %r14,%r13 # Sigma1(e) xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rax,%r12 # T1+=h mov %rbx,%rax add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rbx,%r13 mov %rbx,%r14 ror $28,%rax ror $34,%r13 mov %rbx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rax ror $5,%r13 or %rdx,%r14 # a|c xor %r13,%rax # h=Sigma0(a) and %rdx,%r15 # a&c add %r12,%r8 # d+=T1 and %rcx,%r14 # (a|c)&b add %r12,%rax # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rax # h+=Maj(a,b,c) jmp .Lrounds_16_xx .balign 16 .Lrounds_16_xx: mov 8(%rsp),%r13 mov 112(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 72(%rsp),%r12 add 0(%rsp),%r12 mov %r8,%r13 mov %r8,%r14 mov %r9,%r15 ror $14,%r13 ror $18,%r14 xor %r10,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r8,%r15 # (f^g)&e mov %r12,0(%rsp) xor %r14,%r13 # Sigma1(e) xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r11,%r12 # T1+=h mov %rax,%r11 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rax,%r13 mov %rax,%r14 ror $28,%r11 ror $34,%r13 mov %rax,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r11 ror $5,%r13 or %rcx,%r14 # a|c xor %r13,%r11 # h=Sigma0(a) and %rcx,%r15 # a&c add %r12,%rdx # d+=T1 and %rbx,%r14 # (a|c)&b add %r12,%r11 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r11 # h+=Maj(a,b,c) mov 16(%rsp),%r13 mov 120(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 80(%rsp),%r12 add 8(%rsp),%r12 mov %rdx,%r13 mov %rdx,%r14 mov %r8,%r15 ror $14,%r13 ror $18,%r14 xor %r9,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rdx,%r15 # (f^g)&e mov %r12,8(%rsp) xor %r14,%r13 # Sigma1(e) xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r10,%r12 # T1+=h mov %r11,%r10 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r11,%r13 mov %r11,%r14 ror $28,%r10 ror $34,%r13 mov %r11,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r10 ror $5,%r13 or %rbx,%r14 # a|c xor %r13,%r10 # h=Sigma0(a) and %rbx,%r15 # a&c add %r12,%rcx # d+=T1 and %rax,%r14 # (a|c)&b add %r12,%r10 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r10 # h+=Maj(a,b,c) mov 24(%rsp),%r13 mov 0(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 88(%rsp),%r12 add 16(%rsp),%r12 mov %rcx,%r13 mov %rcx,%r14 mov %rdx,%r15 ror $14,%r13 ror $18,%r14 xor %r8,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rcx,%r15 # (f^g)&e mov %r12,16(%rsp) xor %r14,%r13 # Sigma1(e) xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r9,%r12 # T1+=h mov %r10,%r9 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r10,%r13 mov %r10,%r14 ror $28,%r9 ror $34,%r13 mov %r10,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r9 ror $5,%r13 or %rax,%r14 # a|c xor %r13,%r9 # h=Sigma0(a) and %rax,%r15 # a&c add %r12,%rbx # d+=T1 and %r11,%r14 # (a|c)&b add %r12,%r9 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r9 # h+=Maj(a,b,c) mov 32(%rsp),%r13 mov 8(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 96(%rsp),%r12 add 24(%rsp),%r12 mov %rbx,%r13 mov %rbx,%r14 mov %rcx,%r15 ror $14,%r13 ror $18,%r14 xor %rdx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rbx,%r15 # (f^g)&e mov %r12,24(%rsp) xor %r14,%r13 # Sigma1(e) xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r8,%r12 # T1+=h mov %r9,%r8 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r9,%r13 mov %r9,%r14 ror $28,%r8 ror $34,%r13 mov %r9,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r8 ror $5,%r13 or %r11,%r14 # a|c xor %r13,%r8 # h=Sigma0(a) and %r11,%r15 # a&c add %r12,%rax # d+=T1 and %r10,%r14 # (a|c)&b add %r12,%r8 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r8 # h+=Maj(a,b,c) mov 40(%rsp),%r13 mov 16(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 104(%rsp),%r12 add 32(%rsp),%r12 mov %rax,%r13 mov %rax,%r14 mov %rbx,%r15 ror $14,%r13 ror $18,%r14 xor %rcx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rax,%r15 # (f^g)&e mov %r12,32(%rsp) xor %r14,%r13 # Sigma1(e) xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rdx,%r12 # T1+=h mov %r8,%rdx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r8,%r13 mov %r8,%r14 ror $28,%rdx ror $34,%r13 mov %r8,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rdx ror $5,%r13 or %r10,%r14 # a|c xor %r13,%rdx # h=Sigma0(a) and %r10,%r15 # a&c add %r12,%r11 # d+=T1 and %r9,%r14 # (a|c)&b add %r12,%rdx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rdx # h+=Maj(a,b,c) mov 48(%rsp),%r13 mov 24(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 112(%rsp),%r12 add 40(%rsp),%r12 mov %r11,%r13 mov %r11,%r14 mov %rax,%r15 ror $14,%r13 ror $18,%r14 xor %rbx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r11,%r15 # (f^g)&e mov %r12,40(%rsp) xor %r14,%r13 # Sigma1(e) xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rcx,%r12 # T1+=h mov %rdx,%rcx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rdx,%r13 mov %rdx,%r14 ror $28,%rcx ror $34,%r13 mov %rdx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rcx ror $5,%r13 or %r9,%r14 # a|c xor %r13,%rcx # h=Sigma0(a) and %r9,%r15 # a&c add %r12,%r10 # d+=T1 and %r8,%r14 # (a|c)&b add %r12,%rcx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rcx # h+=Maj(a,b,c) mov 56(%rsp),%r13 mov 32(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 120(%rsp),%r12 add 48(%rsp),%r12 mov %r10,%r13 mov %r10,%r14 mov %r11,%r15 ror $14,%r13 ror $18,%r14 xor %rax,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r10,%r15 # (f^g)&e mov %r12,48(%rsp) xor %r14,%r13 # Sigma1(e) xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rbx,%r12 # T1+=h mov %rcx,%rbx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rcx,%r13 mov %rcx,%r14 ror $28,%rbx ror $34,%r13 mov %rcx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rbx ror $5,%r13 or %r8,%r14 # a|c xor %r13,%rbx # h=Sigma0(a) and %r8,%r15 # a&c add %r12,%r9 # d+=T1 and %rdx,%r14 # (a|c)&b add %r12,%rbx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rbx # h+=Maj(a,b,c) mov 64(%rsp),%r13 mov 40(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 0(%rsp),%r12 add 56(%rsp),%r12 mov %r9,%r13 mov %r9,%r14 mov %r10,%r15 ror $14,%r13 ror $18,%r14 xor %r11,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r9,%r15 # (f^g)&e mov %r12,56(%rsp) xor %r14,%r13 # Sigma1(e) xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rax,%r12 # T1+=h mov %rbx,%rax add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rbx,%r13 mov %rbx,%r14 ror $28,%rax ror $34,%r13 mov %rbx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rax ror $5,%r13 or %rdx,%r14 # a|c xor %r13,%rax # h=Sigma0(a) and %rdx,%r15 # a&c add %r12,%r8 # d+=T1 and %rcx,%r14 # (a|c)&b add %r12,%rax # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rax # h+=Maj(a,b,c) mov 72(%rsp),%r13 mov 48(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 8(%rsp),%r12 add 64(%rsp),%r12 mov %r8,%r13 mov %r8,%r14 mov %r9,%r15 ror $14,%r13 ror $18,%r14 xor %r10,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r8,%r15 # (f^g)&e mov %r12,64(%rsp) xor %r14,%r13 # Sigma1(e) xor %r10,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r11,%r12 # T1+=h mov %rax,%r11 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rax,%r13 mov %rax,%r14 ror $28,%r11 ror $34,%r13 mov %rax,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r11 ror $5,%r13 or %rcx,%r14 # a|c xor %r13,%r11 # h=Sigma0(a) and %rcx,%r15 # a&c add %r12,%rdx # d+=T1 and %rbx,%r14 # (a|c)&b add %r12,%r11 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r11 # h+=Maj(a,b,c) mov 80(%rsp),%r13 mov 56(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 16(%rsp),%r12 add 72(%rsp),%r12 mov %rdx,%r13 mov %rdx,%r14 mov %r8,%r15 ror $14,%r13 ror $18,%r14 xor %r9,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rdx,%r15 # (f^g)&e mov %r12,72(%rsp) xor %r14,%r13 # Sigma1(e) xor %r9,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r10,%r12 # T1+=h mov %r11,%r10 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r11,%r13 mov %r11,%r14 ror $28,%r10 ror $34,%r13 mov %r11,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r10 ror $5,%r13 or %rbx,%r14 # a|c xor %r13,%r10 # h=Sigma0(a) and %rbx,%r15 # a&c add %r12,%rcx # d+=T1 and %rax,%r14 # (a|c)&b add %r12,%r10 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r10 # h+=Maj(a,b,c) mov 88(%rsp),%r13 mov 64(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 24(%rsp),%r12 add 80(%rsp),%r12 mov %rcx,%r13 mov %rcx,%r14 mov %rdx,%r15 ror $14,%r13 ror $18,%r14 xor %r8,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rcx,%r15 # (f^g)&e mov %r12,80(%rsp) xor %r14,%r13 # Sigma1(e) xor %r8,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r9,%r12 # T1+=h mov %r10,%r9 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r10,%r13 mov %r10,%r14 ror $28,%r9 ror $34,%r13 mov %r10,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r9 ror $5,%r13 or %rax,%r14 # a|c xor %r13,%r9 # h=Sigma0(a) and %rax,%r15 # a&c add %r12,%rbx # d+=T1 and %r11,%r14 # (a|c)&b add %r12,%r9 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r9 # h+=Maj(a,b,c) mov 96(%rsp),%r13 mov 72(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 32(%rsp),%r12 add 88(%rsp),%r12 mov %rbx,%r13 mov %rbx,%r14 mov %rcx,%r15 ror $14,%r13 ror $18,%r14 xor %rdx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rbx,%r15 # (f^g)&e mov %r12,88(%rsp) xor %r14,%r13 # Sigma1(e) xor %rdx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %r8,%r12 # T1+=h mov %r9,%r8 add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r9,%r13 mov %r9,%r14 ror $28,%r8 ror $34,%r13 mov %r9,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%r8 ror $5,%r13 or %r11,%r14 # a|c xor %r13,%r8 # h=Sigma0(a) and %r11,%r15 # a&c add %r12,%rax # d+=T1 and %r10,%r14 # (a|c)&b add %r12,%r8 # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%r8 # h+=Maj(a,b,c) mov 104(%rsp),%r13 mov 80(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 40(%rsp),%r12 add 96(%rsp),%r12 mov %rax,%r13 mov %rax,%r14 mov %rbx,%r15 ror $14,%r13 ror $18,%r14 xor %rcx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %rax,%r15 # (f^g)&e mov %r12,96(%rsp) xor %r14,%r13 # Sigma1(e) xor %rcx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rdx,%r12 # T1+=h mov %r8,%rdx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %r8,%r13 mov %r8,%r14 ror $28,%rdx ror $34,%r13 mov %r8,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rdx ror $5,%r13 or %r10,%r14 # a|c xor %r13,%rdx # h=Sigma0(a) and %r10,%r15 # a&c add %r12,%r11 # d+=T1 and %r9,%r14 # (a|c)&b add %r12,%rdx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rdx # h+=Maj(a,b,c) mov 112(%rsp),%r13 mov 88(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 48(%rsp),%r12 add 104(%rsp),%r12 mov %r11,%r13 mov %r11,%r14 mov %rax,%r15 ror $14,%r13 ror $18,%r14 xor %rbx,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r11,%r15 # (f^g)&e mov %r12,104(%rsp) xor %r14,%r13 # Sigma1(e) xor %rbx,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rcx,%r12 # T1+=h mov %rdx,%rcx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rdx,%r13 mov %rdx,%r14 ror $28,%rcx ror $34,%r13 mov %rdx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rcx ror $5,%r13 or %r9,%r14 # a|c xor %r13,%rcx # h=Sigma0(a) and %r9,%r15 # a&c add %r12,%r10 # d+=T1 and %r8,%r14 # (a|c)&b add %r12,%rcx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rcx # h+=Maj(a,b,c) mov 120(%rsp),%r13 mov 96(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 56(%rsp),%r12 add 112(%rsp),%r12 mov %r10,%r13 mov %r10,%r14 mov %r11,%r15 ror $14,%r13 ror $18,%r14 xor %rax,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r10,%r15 # (f^g)&e mov %r12,112(%rsp) xor %r14,%r13 # Sigma1(e) xor %rax,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rbx,%r12 # T1+=h mov %rcx,%rbx add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rcx,%r13 mov %rcx,%r14 ror $28,%rbx ror $34,%r13 mov %rcx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rbx ror $5,%r13 or %r8,%r14 # a|c xor %r13,%rbx # h=Sigma0(a) and %r8,%r15 # a&c add %r12,%r9 # d+=T1 and %rdx,%r14 # (a|c)&b add %r12,%rbx # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rbx # h+=Maj(a,b,c) mov 0(%rsp),%r13 mov 104(%rsp),%r12 mov %r13,%r15 shr $7,%r13 ror $1,%r15 xor %r15,%r13 ror $7,%r15 xor %r15,%r13 # sigma0(X[(i+1)&0xf]) mov %r12,%r14 shr $6,%r12 ror $19,%r14 xor %r14,%r12 ror $42,%r14 xor %r14,%r12 # sigma1(X[(i+14)&0xf]) add %r13,%r12 add 64(%rsp),%r12 add 120(%rsp),%r12 mov %r9,%r13 mov %r9,%r14 mov %r10,%r15 ror $14,%r13 ror $18,%r14 xor %r11,%r15 # f^g xor %r14,%r13 ror $23,%r14 and %r9,%r15 # (f^g)&e mov %r12,120(%rsp) xor %r14,%r13 # Sigma1(e) xor %r11,%r15 # Ch(e,f,g)=((f^g)&e)^g add %rax,%r12 # T1+=h mov %rbx,%rax add %r13,%r12 # T1+=Sigma1(e) add %r15,%r12 # T1+=Ch(e,f,g) mov %rbx,%r13 mov %rbx,%r14 ror $28,%rax ror $34,%r13 mov %rbx,%r15 add (%rbp,%rdi,8),%r12 # T1+=K[round] xor %r13,%rax ror $5,%r13 or %rdx,%r14 # a|c xor %r13,%rax # h=Sigma0(a) and %rdx,%r15 # a&c add %r12,%r8 # d+=T1 and %rcx,%r14 # (a|c)&b add %r12,%rax # h+=T1 or %r15,%r14 # Maj(a,b,c)=((a|c)&b)|(a&c) lea 1(%rdi),%rdi # round++ add %r14,%rax # h+=Maj(a,b,c) cmp $80,%rdi jb .Lrounds_16_xx mov 16*8+0*8(%rsp),%rdi lea 16*8(%rsi),%rsi add 8*0(%rdi),%rax add 8*1(%rdi),%rbx add 8*2(%rdi),%rcx add 8*3(%rdi),%rdx add 8*4(%rdi),%r8 add 8*5(%rdi),%r9 add 8*6(%rdi),%r10 add 8*7(%rdi),%r11 cmp 16*8+2*8(%rsp),%rsi mov %rax,8*0(%rdi) mov %rbx,8*1(%rdi) mov %rcx,8*2(%rdi) mov %rdx,8*3(%rdi) mov %r8,8*4(%rdi) mov %r9,8*5(%rdi) mov %r10,8*6(%rdi) mov %r11,8*7(%rdi) jb .Lloop mov 16*8+3*8(%rsp),%rsp .cfi_def_cfa %rsp,56 pop %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 pop %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 pop %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 pop %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 pop %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp pop %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx RET .cfi_endproc SET_SIZE(SHA512TransformBlocks) -.section .rodata +SECTION_STATIC .balign 64 SET_OBJ(K512) K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif /* !lint && !__lint */ #if defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif -