]>
Commit | Line | Data |
---|---|---|
35a76a03 TS |
1 | /* |
2 | * Implement fast Fletcher4 with SSE2,SSSE3 instructions. (x86) | |
3 | * | |
4 | * Use the 128-bit SSE2/SSSE3 SIMD instructions and registers to compute | |
7f319493 | 5 | * Fletcher4 in two incremental 64-bit parallel accumulator streams, |
35a76a03 TS |
6 | * and then combine the streams to form the final four checksum words. |
7 | * This implementation is a derivative of the AVX SIMD implementation by | |
8 | * James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c). | |
9 | * | |
10 | * Copyright (C) 2016 Tyler J. Stachecki. | |
11 | * | |
12 | * Authors: | |
13 | * Tyler J. Stachecki <stachecki.tyler@gmail.com> | |
14 | * | |
15 | * This software is available to you under a choice of one of two | |
16 | * licenses. You may choose to be licensed under the terms of the GNU | |
17 | * General Public License (GPL) Version 2, available from the file | |
18 | * COPYING in the main directory of this source tree, or the | |
19 | * OpenIB.org BSD license below: | |
20 | * | |
21 | * Redistribution and use in source and binary forms, with or | |
22 | * without modification, are permitted provided that the following | |
23 | * conditions are met: | |
24 | * | |
25 | * - Redistributions of source code must retain the above | |
26 | * copyright notice, this list of conditions and the following | |
27 | * disclaimer. | |
28 | * | |
29 | * - Redistributions in binary form must reproduce the above | |
30 | * copyright notice, this list of conditions and the following | |
31 | * disclaimer in the documentation and/or other materials | |
32 | * provided with the distribution. | |
33 | * | |
34 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
35 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
36 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
37 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
38 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
39 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
40 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
41 | * SOFTWARE. | |
42 | */ | |
43 | ||
44 | #if defined(HAVE_SSE2) | |
45 | ||
006e9a40 | 46 | #include <sys/simd.h> |
35a76a03 | 47 | #include <sys/spa_checksum.h> |
d465fc58 | 48 | #include <sys/string.h> |
5bf703b8 | 49 | #include <sys/byteorder.h> |
35a76a03 | 50 | #include <zfs_fletcher.h> |
35a76a03 TS |
51 | |
52 | static void | |
4ea3f864 GM |
53 | fletcher_4_sse2_init(fletcher_4_ctx_t *ctx) |
54 | { | |
861166b0 | 55 | memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t)); |
35a76a03 TS |
56 | } |
57 | ||
58 | static void | |
4ea3f864 GM |
59 | fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) |
60 | { | |
35a76a03 TS |
61 | uint64_t A, B, C, D; |
62 | ||
35a76a03 TS |
63 | /* |
64 | * The mixing matrix for checksum calculation is: | |
65 | * a = a0 + a1 | |
66 | * b = 2b0 + 2b1 - a1 | |
67 | * c = 4c0 - b0 + 4c1 -3b1 | |
68 | * d = 8d0 - 4c0 + 8d1 - 8c1 + b1; | |
69 | * | |
70 | * c and d are multiplied by 4 and 8, respectively, | |
71 | * before spilling the vectors out to memory. | |
72 | */ | |
5bf703b8 GN |
73 | A = ctx->sse[0].v[0] + ctx->sse[0].v[1]; |
74 | B = 2 * ctx->sse[1].v[0] + 2 * ctx->sse[1].v[1] - ctx->sse[0].v[1]; | |
75 | C = 4 * ctx->sse[2].v[0] - ctx->sse[1].v[0] + 4 * ctx->sse[2].v[1] - | |
76 | 3 * ctx->sse[1].v[1]; | |
77 | D = 8 * ctx->sse[3].v[0] - 4 * ctx->sse[2].v[0] + 8 * ctx->sse[3].v[1] - | |
78 | 8 * ctx->sse[2].v[1] + ctx->sse[1].v[1]; | |
35a76a03 TS |
79 | |
80 | ZIO_SET_CHECKSUM(zcp, A, B, C, D); | |
81 | } | |
82 | ||
5bf703b8 GN |
83 | #define FLETCHER_4_SSE_RESTORE_CTX(ctx) \ |
84 | { \ | |
85 | asm volatile("movdqu %0, %%xmm0" :: "m" ((ctx)->sse[0])); \ | |
86 | asm volatile("movdqu %0, %%xmm1" :: "m" ((ctx)->sse[1])); \ | |
87 | asm volatile("movdqu %0, %%xmm2" :: "m" ((ctx)->sse[2])); \ | |
88 | asm volatile("movdqu %0, %%xmm3" :: "m" ((ctx)->sse[3])); \ | |
89 | } | |
90 | ||
91 | #define FLETCHER_4_SSE_SAVE_CTX(ctx) \ | |
92 | { \ | |
93 | asm volatile("movdqu %%xmm0, %0" : "=m" ((ctx)->sse[0])); \ | |
94 | asm volatile("movdqu %%xmm1, %0" : "=m" ((ctx)->sse[1])); \ | |
95 | asm volatile("movdqu %%xmm2, %0" : "=m" ((ctx)->sse[2])); \ | |
96 | asm volatile("movdqu %%xmm3, %0" : "=m" ((ctx)->sse[3])); \ | |
97 | } | |
98 | ||
35a76a03 | 99 | static void |
5bf703b8 | 100 | fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) |
35a76a03 TS |
101 | { |
102 | const uint64_t *ip = buf; | |
103 | const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); | |
104 | ||
5bf703b8 GN |
105 | FLETCHER_4_SSE_RESTORE_CTX(ctx); |
106 | ||
35a76a03 TS |
107 | asm volatile("pxor %xmm4, %xmm4"); |
108 | ||
59493b63 | 109 | do { |
35a76a03 TS |
110 | asm volatile("movdqu %0, %%xmm5" :: "m"(*ip)); |
111 | asm volatile("movdqa %xmm5, %xmm6"); | |
112 | asm volatile("punpckldq %xmm4, %xmm5"); | |
113 | asm volatile("punpckhdq %xmm4, %xmm6"); | |
114 | asm volatile("paddq %xmm5, %xmm0"); | |
115 | asm volatile("paddq %xmm0, %xmm1"); | |
116 | asm volatile("paddq %xmm1, %xmm2"); | |
117 | asm volatile("paddq %xmm2, %xmm3"); | |
118 | asm volatile("paddq %xmm6, %xmm0"); | |
119 | asm volatile("paddq %xmm0, %xmm1"); | |
120 | asm volatile("paddq %xmm1, %xmm2"); | |
121 | asm volatile("paddq %xmm2, %xmm3"); | |
59493b63 | 122 | } while ((ip += 2) < ipend); |
5bf703b8 GN |
123 | |
124 | FLETCHER_4_SSE_SAVE_CTX(ctx); | |
35a76a03 TS |
125 | } |
126 | ||
127 | static void | |
5bf703b8 | 128 | fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) |
35a76a03 TS |
129 | { |
130 | const uint32_t *ip = buf; | |
131 | const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size); | |
132 | ||
5bf703b8 | 133 | FLETCHER_4_SSE_RESTORE_CTX(ctx); |
35a76a03 | 134 | |
59493b63 | 135 | do { |
5bf703b8 GN |
136 | uint32_t scratch1 = BSWAP_32(ip[0]); |
137 | uint32_t scratch2 = BSWAP_32(ip[1]); | |
138 | asm volatile("movd %0, %%xmm5" :: "r"(scratch1)); | |
139 | asm volatile("movd %0, %%xmm6" :: "r"(scratch2)); | |
35a76a03 TS |
140 | asm volatile("punpcklqdq %xmm6, %xmm5"); |
141 | asm volatile("paddq %xmm5, %xmm0"); | |
142 | asm volatile("paddq %xmm0, %xmm1"); | |
143 | asm volatile("paddq %xmm1, %xmm2"); | |
144 | asm volatile("paddq %xmm2, %xmm3"); | |
59493b63 | 145 | } while ((ip += 2) < ipend); |
5bf703b8 GN |
146 | |
147 | FLETCHER_4_SSE_SAVE_CTX(ctx); | |
35a76a03 TS |
148 | } |
149 | ||
150 | static boolean_t fletcher_4_sse2_valid(void) | |
151 | { | |
e5db3134 | 152 | return (kfpu_allowed() && zfs_sse2_available()); |
35a76a03 TS |
153 | } |
154 | ||
155 | const fletcher_4_ops_t fletcher_4_sse2_ops = { | |
fc897b24 GN |
156 | .init_native = fletcher_4_sse2_init, |
157 | .fini_native = fletcher_4_sse2_fini, | |
158 | .compute_native = fletcher_4_sse2_native, | |
159 | .init_byteswap = fletcher_4_sse2_init, | |
160 | .fini_byteswap = fletcher_4_sse2_fini, | |
35a76a03 TS |
161 | .compute_byteswap = fletcher_4_sse2_byteswap, |
162 | .valid = fletcher_4_sse2_valid, | |
78289b84 | 163 | .uses_fpu = B_TRUE, |
35a76a03 TS |
164 | .name = "sse2" |
165 | }; | |
166 | ||
167 | #endif /* defined(HAVE_SSE2) */ | |
168 | ||
169 | #if defined(HAVE_SSE2) && defined(HAVE_SSSE3) | |
170 | static void | |
5bf703b8 | 171 | fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size) |
35a76a03 | 172 | { |
5bf703b8 | 173 | static const zfs_fletcher_sse_t mask = { |
35a76a03 TS |
174 | .v = { 0x0405060700010203, 0x0C0D0E0F08090A0B } |
175 | }; | |
176 | ||
177 | const uint64_t *ip = buf; | |
178 | const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size); | |
179 | ||
5bf703b8 GN |
180 | FLETCHER_4_SSE_RESTORE_CTX(ctx); |
181 | ||
3d11ecbd | 182 | asm volatile("movdqu %0, %%xmm7"::"m" (mask)); |
35a76a03 TS |
183 | asm volatile("pxor %xmm4, %xmm4"); |
184 | ||
59493b63 | 185 | do { |
35a76a03 TS |
186 | asm volatile("movdqu %0, %%xmm5"::"m" (*ip)); |
187 | asm volatile("pshufb %xmm7, %xmm5"); | |
188 | asm volatile("movdqa %xmm5, %xmm6"); | |
189 | asm volatile("punpckldq %xmm4, %xmm5"); | |
190 | asm volatile("punpckhdq %xmm4, %xmm6"); | |
191 | asm volatile("paddq %xmm5, %xmm0"); | |
192 | asm volatile("paddq %xmm0, %xmm1"); | |
193 | asm volatile("paddq %xmm1, %xmm2"); | |
194 | asm volatile("paddq %xmm2, %xmm3"); | |
195 | asm volatile("paddq %xmm6, %xmm0"); | |
196 | asm volatile("paddq %xmm0, %xmm1"); | |
197 | asm volatile("paddq %xmm1, %xmm2"); | |
198 | asm volatile("paddq %xmm2, %xmm3"); | |
59493b63 | 199 | } while ((ip += 2) < ipend); |
5bf703b8 GN |
200 | |
201 | FLETCHER_4_SSE_SAVE_CTX(ctx); | |
35a76a03 TS |
202 | } |
203 | ||
204 | static boolean_t fletcher_4_ssse3_valid(void) | |
205 | { | |
e5db3134 BB |
206 | return (kfpu_allowed() && zfs_sse2_available() && |
207 | zfs_ssse3_available()); | |
35a76a03 TS |
208 | } |
209 | ||
210 | const fletcher_4_ops_t fletcher_4_ssse3_ops = { | |
fc897b24 GN |
211 | .init_native = fletcher_4_sse2_init, |
212 | .fini_native = fletcher_4_sse2_fini, | |
213 | .compute_native = fletcher_4_sse2_native, | |
214 | .init_byteswap = fletcher_4_sse2_init, | |
215 | .fini_byteswap = fletcher_4_sse2_fini, | |
35a76a03 TS |
216 | .compute_byteswap = fletcher_4_ssse3_byteswap, |
217 | .valid = fletcher_4_ssse3_valid, | |
78289b84 | 218 | .uses_fpu = B_TRUE, |
35a76a03 TS |
219 | .name = "ssse3" |
220 | }; | |
221 | ||
222 | #endif /* defined(HAVE_SSE2) && defined(HAVE_SSSE3) */ |