diff --git a/module/zcommon/zfs_fletcher_aarch64_neon.c b/module/zcommon/zfs_fletcher_aarch64_neon.c index 26f2115c44bf..b6ec3da6158d 100644 --- a/module/zcommon/zfs_fletcher_aarch64_neon.c +++ b/module/zcommon/zfs_fletcher_aarch64_neon.c @@ -1,208 +1,209 @@ +// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-only /* * Implement fast Fletcher4 with NEON instructions. (aarch64) * * Use the 128-bit NEON SIMD instructions and registers to compute * Fletcher4 in two incremental 64-bit parallel accumulator streams, * and then combine the streams to form the final four checksum words. * This implementation is a derivative of the AVX SIMD implementation by * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c). * * Copyright (C) 2016 Romain Dolbeau. * * Authors: * Romain Dolbeau * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #if defined(__aarch64__) #include #include #include #include static void fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx) { memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t)); } static void fletcher_4_aarch64_neon_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) { uint64_t A, B, C, D; A = ctx->aarch64_neon[0].v[0] + ctx->aarch64_neon[0].v[1]; B = 2 * ctx->aarch64_neon[1].v[0] + 2 * ctx->aarch64_neon[1].v[1] - ctx->aarch64_neon[0].v[1]; C = 4 * ctx->aarch64_neon[2].v[0] - ctx->aarch64_neon[1].v[0] + 4 * ctx->aarch64_neon[2].v[1] - 3 * ctx->aarch64_neon[1].v[1]; D = 8 * ctx->aarch64_neon[3].v[0] - 4 * ctx->aarch64_neon[2].v[0] + 8 * ctx->aarch64_neon[3].v[1] - 8 * ctx->aarch64_neon[2].v[1] + ctx->aarch64_neon[1].v[1]; ZIO_SET_CHECKSUM(zcp, A, B, C, D); } #define NEON_INIT_LOOP() \ asm("eor %[ZERO].16b,%[ZERO].16b,%[ZERO].16b\n" \ "ld1 { %[ACC0].4s }, %[CTX0]\n" \ "ld1 { %[ACC1].4s }, %[CTX1]\n" \ "ld1 { %[ACC2].4s }, %[CTX2]\n" \ "ld1 { %[ACC3].4s }, %[CTX3]\n" \ : [ZERO] "=w" (ZERO), \ [ACC0] "=w" (ACC0), [ACC1] "=w" (ACC1), \ [ACC2] "=w" (ACC2), [ACC3] "=w" (ACC3) \ : [CTX0] "Q" (ctx->aarch64_neon[0]), \ [CTX1] "Q" (ctx->aarch64_neon[1]), \ [CTX2] "Q" (ctx->aarch64_neon[2]), \ [CTX3] "Q" (ctx->aarch64_neon[3])) #define NEON_DO_REVERSE "rev32 %[SRC].16b, %[SRC].16b\n" #define NEON_DONT_REVERSE "" #define NEON_MAIN_LOOP(REVERSE) \ asm("ld1 { %[SRC].4s }, %[IP]\n" \ REVERSE \ "zip1 %[TMP1].4s, %[SRC].4s, %[ZERO].4s\n" \ "zip2 %[TMP2].4s, %[SRC].4s, %[ZERO].4s\n" \ "add %[ACC0].2d, %[ACC0].2d, %[TMP1].2d\n" \ "add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \ "add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \ "add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \ "add %[ACC0].2d, %[ACC0].2d, %[TMP2].2d\n" \ "add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \ "add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \ "add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \ : [SRC] "=&w" (SRC), \ [TMP1] "=&w" (TMP1), [TMP2] "=&w" (TMP2), \ [ACC0] "+w" (ACC0), [ACC1] "+w" (ACC1), \ [ACC2] "+w" (ACC2), [ACC3] "+w" (ACC3) \ : [ZERO] "w" (ZERO), [IP] "Q" (*ip)) #define NEON_FINI_LOOP() \ asm("st1 { %[ACC0].4s },%[DST0]\n" \ "st1 { %[ACC1].4s },%[DST1]\n" \ "st1 { %[ACC2].4s },%[DST2]\n" \ "st1 { %[ACC3].4s },%[DST3]\n" \ : [DST0] "=Q" (ctx->aarch64_neon[0]), \ [DST1] "=Q" (ctx->aarch64_neon[1]), \ [DST2] "=Q" (ctx->aarch64_neon[2]), \ [DST3] "=Q" (ctx->aarch64_neon[3]) \ : [ACC0] "w" (ACC0), [ACC1] "w" (ACC1), \ [ACC2] "w" (ACC2), [ACC3] "w" (ACC3)) static void fletcher_4_aarch64_neon_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); #if defined(_KERNEL) register unsigned char ZERO asm("v0") __attribute__((vector_size(16))); register unsigned char ACC0 asm("v1") __attribute__((vector_size(16))); register unsigned char ACC1 asm("v2") __attribute__((vector_size(16))); register unsigned char ACC2 asm("v3") __attribute__((vector_size(16))); register unsigned char ACC3 asm("v4") __attribute__((vector_size(16))); register unsigned char TMP1 asm("v5") __attribute__((vector_size(16))); register unsigned char TMP2 asm("v6") __attribute__((vector_size(16))); register unsigned char SRC asm("v7") __attribute__((vector_size(16))); #else unsigned char ZERO __attribute__((vector_size(16))); unsigned char ACC0 __attribute__((vector_size(16))); unsigned char ACC1 __attribute__((vector_size(16))); unsigned char ACC2 __attribute__((vector_size(16))); unsigned char ACC3 __attribute__((vector_size(16))); unsigned char TMP1 __attribute__((vector_size(16))); unsigned char TMP2 __attribute__((vector_size(16))); unsigned char SRC __attribute__((vector_size(16))); #endif NEON_INIT_LOOP(); do { NEON_MAIN_LOOP(NEON_DONT_REVERSE); } while ((ip += 2) < ipend); NEON_FINI_LOOP(); } static void fletcher_4_aarch64_neon_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); #if defined(_KERNEL) register unsigned char ZERO asm("v0") __attribute__((vector_size(16))); register unsigned char ACC0 asm("v1") __attribute__((vector_size(16))); register unsigned char ACC1 asm("v2") __attribute__((vector_size(16))); register unsigned char ACC2 asm("v3") __attribute__((vector_size(16))); register unsigned char ACC3 asm("v4") __attribute__((vector_size(16))); register unsigned char TMP1 asm("v5") __attribute__((vector_size(16))); register unsigned char TMP2 asm("v6") __attribute__((vector_size(16))); register unsigned char SRC asm("v7") __attribute__((vector_size(16))); #else unsigned char ZERO __attribute__((vector_size(16))); unsigned char ACC0 __attribute__((vector_size(16))); unsigned char ACC1 __attribute__((vector_size(16))); unsigned char ACC2 __attribute__((vector_size(16))); unsigned char ACC3 __attribute__((vector_size(16))); unsigned char TMP1 __attribute__((vector_size(16))); unsigned char TMP2 __attribute__((vector_size(16))); unsigned char SRC __attribute__((vector_size(16))); #endif NEON_INIT_LOOP(); do { NEON_MAIN_LOOP(NEON_DO_REVERSE); } while ((ip += 2) < ipend); NEON_FINI_LOOP(); } static boolean_t fletcher_4_aarch64_neon_valid(void) { return (kfpu_allowed()); } const fletcher_4_ops_t fletcher_4_aarch64_neon_ops = { .init_native = fletcher_4_aarch64_neon_init, .compute_native = fletcher_4_aarch64_neon_native, .fini_native = fletcher_4_aarch64_neon_fini, .init_byteswap = fletcher_4_aarch64_neon_init, .compute_byteswap = fletcher_4_aarch64_neon_byteswap, .fini_byteswap = fletcher_4_aarch64_neon_fini, .valid = fletcher_4_aarch64_neon_valid, .uses_fpu = B_TRUE, .name = "aarch64_neon" }; #endif /* defined(__aarch64__) */ diff --git a/module/zcommon/zfs_fletcher_intel.c b/module/zcommon/zfs_fletcher_intel.c index 34590a15572d..570076ba2b09 100644 --- a/module/zcommon/zfs_fletcher_intel.c +++ b/module/zcommon/zfs_fletcher_intel.c @@ -1,166 +1,167 @@ +// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-only /* * Implement fast Fletcher4 with AVX2 instructions. (x86_64) * * Use the 256-bit AVX2 SIMD instructions and registers to compute * Fletcher4 in four incremental 64-bit parallel accumulator streams, * and then combine the streams to form the final four checksum words. * * Copyright (C) 2015 Intel Corporation. * * Authors: * James Guilford * Jinshan Xiong * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #if defined(HAVE_AVX) && defined(HAVE_AVX2) #include #include #include #include static void fletcher_4_avx2_init(fletcher_4_ctx_t *ctx) { memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t)); } static void fletcher_4_avx2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) { uint64_t A, B, C, D; A = ctx->avx[0].v[0] + ctx->avx[0].v[1] + ctx->avx[0].v[2] + ctx->avx[0].v[3]; B = 0 - ctx->avx[0].v[1] - 2 * ctx->avx[0].v[2] - 3 * ctx->avx[0].v[3] + 4 * ctx->avx[1].v[0] + 4 * ctx->avx[1].v[1] + 4 * ctx->avx[1].v[2] + 4 * ctx->avx[1].v[3]; C = ctx->avx[0].v[2] + 3 * ctx->avx[0].v[3] - 6 * ctx->avx[1].v[0] - 10 * ctx->avx[1].v[1] - 14 * ctx->avx[1].v[2] - 18 * ctx->avx[1].v[3] + 16 * ctx->avx[2].v[0] + 16 * ctx->avx[2].v[1] + 16 * ctx->avx[2].v[2] + 16 * ctx->avx[2].v[3]; D = 0 - ctx->avx[0].v[3] + 4 * ctx->avx[1].v[0] + 10 * ctx->avx[1].v[1] + 20 * ctx->avx[1].v[2] + 34 * ctx->avx[1].v[3] - 48 * ctx->avx[2].v[0] - 64 * ctx->avx[2].v[1] - 80 * ctx->avx[2].v[2] - 96 * ctx->avx[2].v[3] + 64 * ctx->avx[3].v[0] + 64 * ctx->avx[3].v[1] + 64 * ctx->avx[3].v[2] + 64 * ctx->avx[3].v[3]; ZIO_SET_CHECKSUM(zcp, A, B, C, D); } #define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \ { \ asm volatile("vmovdqu %0, %%ymm0" :: "m" ((ctx)->avx[0])); \ asm volatile("vmovdqu %0, %%ymm1" :: "m" ((ctx)->avx[1])); \ asm volatile("vmovdqu %0, %%ymm2" :: "m" ((ctx)->avx[2])); \ asm volatile("vmovdqu %0, %%ymm3" :: "m" ((ctx)->avx[3])); \ } #define FLETCHER_4_AVX2_SAVE_CTX(ctx) \ { \ asm volatile("vmovdqu %%ymm0, %0" : "=m" ((ctx)->avx[0])); \ asm volatile("vmovdqu %%ymm1, %0" : "=m" ((ctx)->avx[1])); \ asm volatile("vmovdqu %%ymm2, %0" : "=m" ((ctx)->avx[2])); \ asm volatile("vmovdqu %%ymm3, %0" : "=m" ((ctx)->avx[3])); \ } static void fletcher_4_avx2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); FLETCHER_4_AVX2_RESTORE_CTX(ctx); do { asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip)); asm volatile("vpaddq %ymm4, %ymm0, %ymm0"); asm volatile("vpaddq %ymm0, %ymm1, %ymm1"); asm volatile("vpaddq %ymm1, %ymm2, %ymm2"); asm volatile("vpaddq %ymm2, %ymm3, %ymm3"); } while ((ip += 2) < ipend); FLETCHER_4_AVX2_SAVE_CTX(ctx); asm volatile("vzeroupper"); } static void fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { static const zfs_fletcher_avx_t mask = { .v = { 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B, 0xFFFFFFFF00010203, 0xFFFFFFFF08090A0B } }; const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); FLETCHER_4_AVX2_RESTORE_CTX(ctx); asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask)); do { asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip)); asm volatile("vpshufb %ymm5, %ymm4, %ymm4"); asm volatile("vpaddq %ymm4, %ymm0, %ymm0"); asm volatile("vpaddq %ymm0, %ymm1, %ymm1"); asm volatile("vpaddq %ymm1, %ymm2, %ymm2"); asm volatile("vpaddq %ymm2, %ymm3, %ymm3"); } while ((ip += 2) < ipend); FLETCHER_4_AVX2_SAVE_CTX(ctx); asm volatile("vzeroupper"); } static boolean_t fletcher_4_avx2_valid(void) { return (kfpu_allowed() && zfs_avx_available() && zfs_avx2_available()); } const fletcher_4_ops_t fletcher_4_avx2_ops = { .init_native = fletcher_4_avx2_init, .fini_native = fletcher_4_avx2_fini, .compute_native = fletcher_4_avx2_native, .init_byteswap = fletcher_4_avx2_init, .fini_byteswap = fletcher_4_avx2_fini, .compute_byteswap = fletcher_4_avx2_byteswap, .valid = fletcher_4_avx2_valid, .uses_fpu = B_TRUE, .name = "avx2" }; #endif /* defined(HAVE_AVX) && defined(HAVE_AVX2) */ diff --git a/module/zcommon/zfs_fletcher_sse.c b/module/zcommon/zfs_fletcher_sse.c index 8ab9b9acb83b..96aaf1622d73 100644 --- a/module/zcommon/zfs_fletcher_sse.c +++ b/module/zcommon/zfs_fletcher_sse.c @@ -1,222 +1,223 @@ +// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-only /* * Implement fast Fletcher4 with SSE2,SSSE3 instructions. (x86) * * Use the 128-bit SSE2/SSSE3 SIMD instructions and registers to compute * Fletcher4 in two incremental 64-bit parallel accumulator streams, * and then combine the streams to form the final four checksum words. * This implementation is a derivative of the AVX SIMD implementation by * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c). * * Copyright (C) 2016 Tyler J. Stachecki. * * Authors: * Tyler J. Stachecki * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #if defined(HAVE_SSE2) #include #include #include #include #include static void fletcher_4_sse2_init(fletcher_4_ctx_t *ctx) { memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t)); } static void fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) { uint64_t A, B, C, D; /* * The mixing matrix for checksum calculation is: * a = a0 + a1 * b = 2b0 + 2b1 - a1 * c = 4c0 - b0 + 4c1 -3b1 * d = 8d0 - 4c0 + 8d1 - 8c1 + b1; * * c and d are multiplied by 4 and 8, respectively, * before spilling the vectors out to memory. */ A = ctx->sse[0].v[0] + ctx->sse[0].v[1]; B = 2 * ctx->sse[1].v[0] + 2 * ctx->sse[1].v[1] - ctx->sse[0].v[1]; C = 4 * ctx->sse[2].v[0] - ctx->sse[1].v[0] + 4 * ctx->sse[2].v[1] - 3 * ctx->sse[1].v[1]; D = 8 * ctx->sse[3].v[0] - 4 * ctx->sse[2].v[0] + 8 * ctx->sse[3].v[1] - 8 * ctx->sse[2].v[1] + ctx->sse[1].v[1]; ZIO_SET_CHECKSUM(zcp, A, B, C, D); } #define FLETCHER_4_SSE_RESTORE_CTX(ctx) \ { \ asm volatile("movdqu %0, %%xmm0" :: "m" ((ctx)->sse[0])); \ asm volatile("movdqu %0, %%xmm1" :: "m" ((ctx)->sse[1])); \ asm volatile("movdqu %0, %%xmm2" :: "m" ((ctx)->sse[2])); \ asm volatile("movdqu %0, %%xmm3" :: "m" ((ctx)->sse[3])); \ } #define FLETCHER_4_SSE_SAVE_CTX(ctx) \ { \ asm volatile("movdqu %%xmm0, %0" : "=m" ((ctx)->sse[0])); \ asm volatile("movdqu %%xmm1, %0" : "=m" ((ctx)->sse[1])); \ asm volatile("movdqu %%xmm2, %0" : "=m" ((ctx)->sse[2])); \ asm volatile("movdqu %%xmm3, %0" : "=m" ((ctx)->sse[3])); \ } static void fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); FLETCHER_4_SSE_RESTORE_CTX(ctx); asm volatile("pxor %xmm4, %xmm4"); do { asm volatile("movdqu %0, %%xmm5" :: "m"(*ip)); asm volatile("movdqa %xmm5, %xmm6"); asm volatile("punpckldq %xmm4, %xmm5"); asm volatile("punpckhdq %xmm4, %xmm6"); asm volatile("paddq %xmm5, %xmm0"); asm volatile("paddq %xmm0, %xmm1"); asm volatile("paddq %xmm1, %xmm2"); asm volatile("paddq %xmm2, %xmm3"); asm volatile("paddq %xmm6, %xmm0"); asm volatile("paddq %xmm0, %xmm1"); asm volatile("paddq %xmm1, %xmm2"); asm volatile("paddq %xmm2, %xmm3"); } while ((ip += 2) < ipend); FLETCHER_4_SSE_SAVE_CTX(ctx); } static void fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint32_t *ip = buf; const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size); FLETCHER_4_SSE_RESTORE_CTX(ctx); do { uint32_t scratch1 = BSWAP_32(ip[0]); uint32_t scratch2 = BSWAP_32(ip[1]); asm volatile("movd %0, %%xmm5" :: "r"(scratch1)); asm volatile("movd %0, %%xmm6" :: "r"(scratch2)); asm volatile("punpcklqdq %xmm6, %xmm5"); asm volatile("paddq %xmm5, %xmm0"); asm volatile("paddq %xmm0, %xmm1"); asm volatile("paddq %xmm1, %xmm2"); asm volatile("paddq %xmm2, %xmm3"); } while ((ip += 2) < ipend); FLETCHER_4_SSE_SAVE_CTX(ctx); } static boolean_t fletcher_4_sse2_valid(void) { return (kfpu_allowed() && zfs_sse2_available()); } const fletcher_4_ops_t fletcher_4_sse2_ops = { .init_native = fletcher_4_sse2_init, .fini_native = fletcher_4_sse2_fini, .compute_native = fletcher_4_sse2_native, .init_byteswap = fletcher_4_sse2_init, .fini_byteswap = fletcher_4_sse2_fini, .compute_byteswap = fletcher_4_sse2_byteswap, .valid = fletcher_4_sse2_valid, .uses_fpu = B_TRUE, .name = "sse2" }; #endif /* defined(HAVE_SSE2) */ #if defined(HAVE_SSE2) && defined(HAVE_SSSE3) static void fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { static const zfs_fletcher_sse_t mask = { .v = { 0x0405060700010203, 0x0C0D0E0F08090A0B } }; const uint64_t *ip = buf; const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); FLETCHER_4_SSE_RESTORE_CTX(ctx); asm volatile("movdqu %0, %%xmm7"::"m" (mask)); asm volatile("pxor %xmm4, %xmm4"); do { asm volatile("movdqu %0, %%xmm5"::"m" (*ip)); asm volatile("pshufb %xmm7, %xmm5"); asm volatile("movdqa %xmm5, %xmm6"); asm volatile("punpckldq %xmm4, %xmm5"); asm volatile("punpckhdq %xmm4, %xmm6"); asm volatile("paddq %xmm5, %xmm0"); asm volatile("paddq %xmm0, %xmm1"); asm volatile("paddq %xmm1, %xmm2"); asm volatile("paddq %xmm2, %xmm3"); asm volatile("paddq %xmm6, %xmm0"); asm volatile("paddq %xmm0, %xmm1"); asm volatile("paddq %xmm1, %xmm2"); asm volatile("paddq %xmm2, %xmm3"); } while ((ip += 2) < ipend); FLETCHER_4_SSE_SAVE_CTX(ctx); } static boolean_t fletcher_4_ssse3_valid(void) { return (kfpu_allowed() && zfs_sse2_available() && zfs_ssse3_available()); } const fletcher_4_ops_t fletcher_4_ssse3_ops = { .init_native = fletcher_4_sse2_init, .fini_native = fletcher_4_sse2_fini, .compute_native = fletcher_4_sse2_native, .init_byteswap = fletcher_4_sse2_init, .fini_byteswap = fletcher_4_sse2_fini, .compute_byteswap = fletcher_4_ssse3_byteswap, .valid = fletcher_4_ssse3_valid, .uses_fpu = B_TRUE, .name = "ssse3" }; #endif /* defined(HAVE_SSE2) && defined(HAVE_SSSE3) */ diff --git a/module/zcommon/zfs_fletcher_superscalar.c b/module/zcommon/zfs_fletcher_superscalar.c index 2a80816ff3ec..e0faa995e80e 100644 --- a/module/zcommon/zfs_fletcher_superscalar.c +++ b/module/zcommon/zfs_fletcher_superscalar.c @@ -1,164 +1,165 @@ +// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-only /* * Implement fast Fletcher4 using superscalar pipelines. * * Use regular C code to compute * Fletcher4 in two incremental 64-bit parallel accumulator streams, * and then combine the streams to form the final four checksum words. * This implementation is a derivative of the AVX SIMD implementation by * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c). * * Copyright (C) 2016 Romain Dolbeau. * * Authors: * Romain Dolbeau * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include static void fletcher_4_superscalar_init(fletcher_4_ctx_t *ctx) { memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t)); } static void fletcher_4_superscalar_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) { uint64_t A, B, C, D; A = ctx->superscalar[0].v[0] + ctx->superscalar[0].v[1]; B = 2 * ctx->superscalar[1].v[0] + 2 * ctx->superscalar[1].v[1] - ctx->superscalar[0].v[1]; C = 4 * ctx->superscalar[2].v[0] - ctx->superscalar[1].v[0] + 4 * ctx->superscalar[2].v[1] - 3 * ctx->superscalar[1].v[1]; D = 8 * ctx->superscalar[3].v[0] - 4 * ctx->superscalar[2].v[0] + 8 * ctx->superscalar[3].v[1] - 8 * ctx->superscalar[2].v[1] + ctx->superscalar[1].v[1]; ZIO_SET_CHECKSUM(zcp, A, B, C, D); } static void fletcher_4_superscalar_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint32_t *ip = buf; const uint32_t *ipend = ip + (size / sizeof (uint32_t)); uint64_t a, b, c, d; uint64_t a2, b2, c2, d2; a = ctx->superscalar[0].v[0]; b = ctx->superscalar[1].v[0]; c = ctx->superscalar[2].v[0]; d = ctx->superscalar[3].v[0]; a2 = ctx->superscalar[0].v[1]; b2 = ctx->superscalar[1].v[1]; c2 = ctx->superscalar[2].v[1]; d2 = ctx->superscalar[3].v[1]; do { a += ip[0]; a2 += ip[1]; b += a; b2 += a2; c += b; c2 += b2; d += c; d2 += c2; } while ((ip += 2) < ipend); ctx->superscalar[0].v[0] = a; ctx->superscalar[1].v[0] = b; ctx->superscalar[2].v[0] = c; ctx->superscalar[3].v[0] = d; ctx->superscalar[0].v[1] = a2; ctx->superscalar[1].v[1] = b2; ctx->superscalar[2].v[1] = c2; ctx->superscalar[3].v[1] = d2; } static void fletcher_4_superscalar_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint32_t *ip = buf; const uint32_t *ipend = ip + (size / sizeof (uint32_t)); uint64_t a, b, c, d; uint64_t a2, b2, c2, d2; a = ctx->superscalar[0].v[0]; b = ctx->superscalar[1].v[0]; c = ctx->superscalar[2].v[0]; d = ctx->superscalar[3].v[0]; a2 = ctx->superscalar[0].v[1]; b2 = ctx->superscalar[1].v[1]; c2 = ctx->superscalar[2].v[1]; d2 = ctx->superscalar[3].v[1]; do { a += BSWAP_32(ip[0]); a2 += BSWAP_32(ip[1]); b += a; b2 += a2; c += b; c2 += b2; d += c; d2 += c2; } while ((ip += 2) < ipend); ctx->superscalar[0].v[0] = a; ctx->superscalar[1].v[0] = b; ctx->superscalar[2].v[0] = c; ctx->superscalar[3].v[0] = d; ctx->superscalar[0].v[1] = a2; ctx->superscalar[1].v[1] = b2; ctx->superscalar[2].v[1] = c2; ctx->superscalar[3].v[1] = d2; } static boolean_t fletcher_4_superscalar_valid(void) { return (B_TRUE); } const fletcher_4_ops_t fletcher_4_superscalar_ops = { .init_native = fletcher_4_superscalar_init, .compute_native = fletcher_4_superscalar_native, .fini_native = fletcher_4_superscalar_fini, .init_byteswap = fletcher_4_superscalar_init, .compute_byteswap = fletcher_4_superscalar_byteswap, .fini_byteswap = fletcher_4_superscalar_fini, .valid = fletcher_4_superscalar_valid, .uses_fpu = B_FALSE, .name = "superscalar" }; diff --git a/module/zcommon/zfs_fletcher_superscalar4.c b/module/zcommon/zfs_fletcher_superscalar4.c index 0b52bb63d003..1cec1851ef3f 100644 --- a/module/zcommon/zfs_fletcher_superscalar4.c +++ b/module/zcommon/zfs_fletcher_superscalar4.c @@ -1,230 +1,231 @@ +// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-only /* * Implement fast Fletcher4 using superscalar pipelines. * * Use regular C code to compute * Fletcher4 in four incremental 64-bit parallel accumulator streams, * and then combine the streams to form the final four checksum words. * This implementation is a derivative of the AVX SIMD implementation by * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c). * * Copyright (C) 2016 Romain Dolbeau. * * Authors: * Romain Dolbeau * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include static void fletcher_4_superscalar4_init(fletcher_4_ctx_t *ctx) { memset(ctx->superscalar, 0, 4 * sizeof (zfs_fletcher_superscalar_t)); } static void fletcher_4_superscalar4_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) { uint64_t A, B, C, D; A = ctx->superscalar[0].v[0] + ctx->superscalar[0].v[1] + ctx->superscalar[0].v[2] + ctx->superscalar[0].v[3]; B = 0 - ctx->superscalar[0].v[1] - 2 * ctx->superscalar[0].v[2] - 3 * ctx->superscalar[0].v[3] + 4 * ctx->superscalar[1].v[0] + 4 * ctx->superscalar[1].v[1] + 4 * ctx->superscalar[1].v[2] + 4 * ctx->superscalar[1].v[3]; C = ctx->superscalar[0].v[2] + 3 * ctx->superscalar[0].v[3] - 6 * ctx->superscalar[1].v[0] - 10 * ctx->superscalar[1].v[1] - 14 * ctx->superscalar[1].v[2] - 18 * ctx->superscalar[1].v[3] + 16 * ctx->superscalar[2].v[0] + 16 * ctx->superscalar[2].v[1] + 16 * ctx->superscalar[2].v[2] + 16 * ctx->superscalar[2].v[3]; D = 0 - ctx->superscalar[0].v[3] + 4 * ctx->superscalar[1].v[0] + 10 * ctx->superscalar[1].v[1] + 20 * ctx->superscalar[1].v[2] + 34 * ctx->superscalar[1].v[3] - 48 * ctx->superscalar[2].v[0] - 64 * ctx->superscalar[2].v[1] - 80 * ctx->superscalar[2].v[2] - 96 * ctx->superscalar[2].v[3] + 64 * ctx->superscalar[3].v[0] + 64 * ctx->superscalar[3].v[1] + 64 * ctx->superscalar[3].v[2] + 64 * ctx->superscalar[3].v[3]; ZIO_SET_CHECKSUM(zcp, A, B, C, D); } static void fletcher_4_superscalar4_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint32_t *ip = buf; const uint32_t *ipend = ip + (size / sizeof (uint32_t)); uint64_t a, b, c, d; uint64_t a2, b2, c2, d2; uint64_t a3, b3, c3, d3; uint64_t a4, b4, c4, d4; a = ctx->superscalar[0].v[0]; b = ctx->superscalar[1].v[0]; c = ctx->superscalar[2].v[0]; d = ctx->superscalar[3].v[0]; a2 = ctx->superscalar[0].v[1]; b2 = ctx->superscalar[1].v[1]; c2 = ctx->superscalar[2].v[1]; d2 = ctx->superscalar[3].v[1]; a3 = ctx->superscalar[0].v[2]; b3 = ctx->superscalar[1].v[2]; c3 = ctx->superscalar[2].v[2]; d3 = ctx->superscalar[3].v[2]; a4 = ctx->superscalar[0].v[3]; b4 = ctx->superscalar[1].v[3]; c4 = ctx->superscalar[2].v[3]; d4 = ctx->superscalar[3].v[3]; do { a += ip[0]; a2 += ip[1]; a3 += ip[2]; a4 += ip[3]; b += a; b2 += a2; b3 += a3; b4 += a4; c += b; c2 += b2; c3 += b3; c4 += b4; d += c; d2 += c2; d3 += c3; d4 += c4; } while ((ip += 4) < ipend); ctx->superscalar[0].v[0] = a; ctx->superscalar[1].v[0] = b; ctx->superscalar[2].v[0] = c; ctx->superscalar[3].v[0] = d; ctx->superscalar[0].v[1] = a2; ctx->superscalar[1].v[1] = b2; ctx->superscalar[2].v[1] = c2; ctx->superscalar[3].v[1] = d2; ctx->superscalar[0].v[2] = a3; ctx->superscalar[1].v[2] = b3; ctx->superscalar[2].v[2] = c3; ctx->superscalar[3].v[2] = d3; ctx->superscalar[0].v[3] = a4; ctx->superscalar[1].v[3] = b4; ctx->superscalar[2].v[3] = c4; ctx->superscalar[3].v[3] = d4; } static void fletcher_4_superscalar4_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) { const uint32_t *ip = buf; const uint32_t *ipend = ip + (size / sizeof (uint32_t)); uint64_t a, b, c, d; uint64_t a2, b2, c2, d2; uint64_t a3, b3, c3, d3; uint64_t a4, b4, c4, d4; a = ctx->superscalar[0].v[0]; b = ctx->superscalar[1].v[0]; c = ctx->superscalar[2].v[0]; d = ctx->superscalar[3].v[0]; a2 = ctx->superscalar[0].v[1]; b2 = ctx->superscalar[1].v[1]; c2 = ctx->superscalar[2].v[1]; d2 = ctx->superscalar[3].v[1]; a3 = ctx->superscalar[0].v[2]; b3 = ctx->superscalar[1].v[2]; c3 = ctx->superscalar[2].v[2]; d3 = ctx->superscalar[3].v[2]; a4 = ctx->superscalar[0].v[3]; b4 = ctx->superscalar[1].v[3]; c4 = ctx->superscalar[2].v[3]; d4 = ctx->superscalar[3].v[3]; do { a += BSWAP_32(ip[0]); a2 += BSWAP_32(ip[1]); a3 += BSWAP_32(ip[2]); a4 += BSWAP_32(ip[3]); b += a; b2 += a2; b3 += a3; b4 += a4; c += b; c2 += b2; c3 += b3; c4 += b4; d += c; d2 += c2; d3 += c3; d4 += c4; } while ((ip += 4) < ipend); ctx->superscalar[0].v[0] = a; ctx->superscalar[1].v[0] = b; ctx->superscalar[2].v[0] = c; ctx->superscalar[3].v[0] = d; ctx->superscalar[0].v[1] = a2; ctx->superscalar[1].v[1] = b2; ctx->superscalar[2].v[1] = c2; ctx->superscalar[3].v[1] = d2; ctx->superscalar[0].v[2] = a3; ctx->superscalar[1].v[2] = b3; ctx->superscalar[2].v[2] = c3; ctx->superscalar[3].v[2] = d3; ctx->superscalar[0].v[3] = a4; ctx->superscalar[1].v[3] = b4; ctx->superscalar[2].v[3] = c4; ctx->superscalar[3].v[3] = d4; } static boolean_t fletcher_4_superscalar4_valid(void) { return (B_TRUE); } const fletcher_4_ops_t fletcher_4_superscalar4_ops = { .init_native = fletcher_4_superscalar4_init, .compute_native = fletcher_4_superscalar4_native, .fini_native = fletcher_4_superscalar4_fini, .init_byteswap = fletcher_4_superscalar4_init, .compute_byteswap = fletcher_4_superscalar4_byteswap, .fini_byteswap = fletcher_4_superscalar4_fini, .valid = fletcher_4_superscalar4_valid, .uses_fpu = B_FALSE, .name = "superscalar4" };