]>
Commit | Line | Data |
---|---|---|
7f547f85 RD |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (C) 2016 Romain Dolbeau. All rights reserved. | |
01017962 | 23 | * Copyright (C) 2016 Gvozden Nešković. All rights reserved. |
7f547f85 RD |
24 | */ |
25 | ||
26 | #include <sys/isa_defs.h> | |
27 | ||
01017962 | 28 | #if defined(__x86_64) && defined(HAVE_AVX512BW) |
7f547f85 RD |
29 | |
30 | #include <sys/types.h> | |
31 | #include <linux/simd_x86.h> | |
32 | ||
33 | #define __asm __asm__ __volatile__ | |
34 | ||
35 | #define _REG_CNT(_0, _1, _2, _3, _4, _5, _6, _7, N, ...) N | |
36 | #define REG_CNT(r...) _REG_CNT(r, 8, 7, 6, 5, 4, 3, 2, 1) | |
37 | ||
38 | #define VR0_(REG, ...) "zmm"#REG | |
39 | #define VR1_(_1, REG, ...) "zmm"#REG | |
40 | #define VR2_(_1, _2, REG, ...) "zmm"#REG | |
41 | #define VR3_(_1, _2, _3, REG, ...) "zmm"#REG | |
42 | #define VR4_(_1, _2, _3, _4, REG, ...) "zmm"#REG | |
43 | #define VR5_(_1, _2, _3, _4, _5, REG, ...) "zmm"#REG | |
44 | #define VR6_(_1, _2, _3, _4, _5, _6, REG, ...) "zmm"#REG | |
45 | #define VR7_(_1, _2, _3, _4, _5, _6, _7, REG, ...) "zmm"#REG | |
46 | ||
47 | #define VR0(r...) VR0_(r) | |
48 | #define VR1(r...) VR1_(r) | |
49 | #define VR2(r...) VR2_(r, 1) | |
50 | #define VR3(r...) VR3_(r, 1, 2) | |
51 | #define VR4(r...) VR4_(r, 1, 2) | |
52 | #define VR5(r...) VR5_(r, 1, 2, 3) | |
53 | #define VR6(r...) VR6_(r, 1, 2, 3, 4) | |
54 | #define VR7(r...) VR7_(r, 1, 2, 3, 4, 5) | |
55 | ||
56 | #define R_01(REG1, REG2, ...) REG1, REG2 | |
57 | #define _R_23(_0, _1, REG2, REG3, ...) REG2, REG3 | |
58 | #define R_23(REG...) _R_23(REG, 1, 2, 3) | |
59 | ||
60 | #define ASM_BUG() ASSERT(0) | |
61 | ||
62 | extern const uint8_t gf_clmul_mod_lt[4*256][16]; | |
63 | ||
64 | #define ELEM_SIZE 64 | |
65 | ||
66 | typedef struct v { | |
67 | uint8_t b[ELEM_SIZE] __attribute__((aligned(ELEM_SIZE))); | |
68 | } v_t; | |
69 | ||
7f547f85 RD |
70 | #define XOR_ACC(src, r...) \ |
71 | { \ | |
72 | switch (REG_CNT(r)) { \ | |
73 | case 4: \ | |
74 | __asm( \ | |
75 | "vpxorq 0x00(%[SRC]), %%" VR0(r)", %%" VR0(r) "\n" \ | |
76 | "vpxorq 0x40(%[SRC]), %%" VR1(r)", %%" VR1(r) "\n" \ | |
77 | "vpxorq 0x80(%[SRC]), %%" VR2(r)", %%" VR2(r) "\n" \ | |
78 | "vpxorq 0xc0(%[SRC]), %%" VR3(r)", %%" VR3(r) "\n" \ | |
79 | : : [SRC] "r" (src)); \ | |
80 | break; \ | |
81 | case 2: \ | |
82 | __asm( \ | |
83 | "vpxorq 0x00(%[SRC]), %%" VR0(r)", %%" VR0(r) "\n" \ | |
84 | "vpxorq 0x40(%[SRC]), %%" VR1(r)", %%" VR1(r) "\n" \ | |
85 | : : [SRC] "r" (src)); \ | |
86 | break; \ | |
87 | default: \ | |
88 | ASM_BUG(); \ | |
89 | } \ | |
90 | } | |
91 | ||
92 | #define XOR(r...) \ | |
93 | { \ | |
94 | switch (REG_CNT(r)) { \ | |
95 | case 8: \ | |
96 | __asm( \ | |
97 | "vpxorq %" VR0(r) ", %" VR4(r)", %" VR4(r) "\n" \ | |
98 | "vpxorq %" VR1(r) ", %" VR5(r)", %" VR5(r) "\n" \ | |
99 | "vpxorq %" VR2(r) ", %" VR6(r)", %" VR6(r) "\n" \ | |
100 | "vpxorq %" VR3(r) ", %" VR7(r)", %" VR7(r)); \ | |
101 | break; \ | |
102 | case 4: \ | |
103 | __asm( \ | |
104 | "vpxorq %" VR0(r) ", %" VR2(r)", %" VR2(r) "\n" \ | |
105 | "vpxorq %" VR1(r) ", %" VR3(r)", %" VR3(r)); \ | |
106 | break; \ | |
107 | default: \ | |
108 | ASM_BUG(); \ | |
109 | } \ | |
110 | } | |
111 | ||
01017962 | 112 | #define ZERO(r...) XOR(r, r) |
7f547f85 | 113 | |
02730c33 | 114 | #define COPY(r...) \ |
7f547f85 RD |
115 | { \ |
116 | switch (REG_CNT(r)) { \ | |
117 | case 8: \ | |
118 | __asm( \ | |
119 | "vmovdqa64 %" VR0(r) ", %" VR4(r) "\n" \ | |
120 | "vmovdqa64 %" VR1(r) ", %" VR5(r) "\n" \ | |
121 | "vmovdqa64 %" VR2(r) ", %" VR6(r) "\n" \ | |
122 | "vmovdqa64 %" VR3(r) ", %" VR7(r)); \ | |
123 | break; \ | |
124 | case 4: \ | |
125 | __asm( \ | |
126 | "vmovdqa64 %" VR0(r) ", %" VR2(r) "\n" \ | |
127 | "vmovdqa64 %" VR1(r) ", %" VR3(r)); \ | |
128 | break; \ | |
129 | default: \ | |
130 | ASM_BUG(); \ | |
131 | } \ | |
132 | } | |
133 | ||
02730c33 | 134 | #define LOAD(src, r...) \ |
7f547f85 RD |
135 | { \ |
136 | switch (REG_CNT(r)) { \ | |
137 | case 4: \ | |
138 | __asm( \ | |
139 | "vmovdqa64 0x00(%[SRC]), %%" VR0(r) "\n" \ | |
140 | "vmovdqa64 0x40(%[SRC]), %%" VR1(r) "\n" \ | |
141 | "vmovdqa64 0x80(%[SRC]), %%" VR2(r) "\n" \ | |
142 | "vmovdqa64 0xc0(%[SRC]), %%" VR3(r) "\n" \ | |
143 | : : [SRC] "r" (src)); \ | |
144 | break; \ | |
145 | case 2: \ | |
146 | __asm( \ | |
147 | "vmovdqa64 0x00(%[SRC]), %%" VR0(r) "\n" \ | |
148 | "vmovdqa64 0x40(%[SRC]), %%" VR1(r) "\n" \ | |
149 | : : [SRC] "r" (src)); \ | |
150 | break; \ | |
151 | default: \ | |
152 | ASM_BUG(); \ | |
153 | } \ | |
154 | } | |
155 | ||
02730c33 | 156 | #define STORE(dst, r...) \ |
7f547f85 RD |
157 | { \ |
158 | switch (REG_CNT(r)) { \ | |
159 | case 4: \ | |
160 | __asm( \ | |
161 | "vmovdqa64 %%" VR0(r) ", 0x00(%[DST])\n" \ | |
162 | "vmovdqa64 %%" VR1(r) ", 0x40(%[DST])\n" \ | |
163 | "vmovdqa64 %%" VR2(r) ", 0x80(%[DST])\n" \ | |
164 | "vmovdqa64 %%" VR3(r) ", 0xc0(%[DST])\n" \ | |
165 | : : [DST] "r" (dst)); \ | |
166 | break; \ | |
167 | case 2: \ | |
168 | __asm( \ | |
169 | "vmovdqa64 %%" VR0(r) ", 0x00(%[DST])\n" \ | |
170 | "vmovdqa64 %%" VR1(r) ", 0x40(%[DST])\n" \ | |
171 | : : [DST] "r" (dst)); \ | |
172 | break; \ | |
173 | default: \ | |
174 | ASM_BUG(); \ | |
175 | } \ | |
176 | } | |
177 | ||
01017962 GN |
178 | #define MUL2_SETUP() \ |
179 | { \ | |
180 | __asm("vmovq %0, %%xmm22" :: "r"(0x1d1d1d1d1d1d1d1d)); \ | |
181 | __asm("vpbroadcastq %xmm22, %zmm22"); \ | |
182 | __asm("vpxord %zmm23, %zmm23 ,%zmm23"); \ | |
7f547f85 RD |
183 | } |
184 | ||
02730c33 | 185 | #define _MUL2(r...) \ |
7f547f85 RD |
186 | { \ |
187 | switch (REG_CNT(r)) { \ | |
188 | case 2: \ | |
189 | __asm( \ | |
01017962 GN |
190 | "vpcmpb $1, %zmm23, %" VR0(r)", %k1\n" \ |
191 | "vpcmpb $1, %zmm23, %" VR1(r)", %k2\n" \ | |
192 | "vpaddb %" VR0(r)", %" VR0(r)", %" VR0(r) "\n" \ | |
193 | "vpaddb %" VR1(r)", %" VR1(r)", %" VR1(r) "\n" \ | |
194 | "vpxord %zmm22, %" VR0(r)", %zmm12\n" \ | |
195 | "vpxord %zmm22, %" VR1(r)", %zmm13\n" \ | |
196 | "vmovdqu8 %zmm12, %" VR0(r) "{%k1}\n" \ | |
197 | "vmovdqu8 %zmm13, %" VR1(r) "{%k2}"); \ | |
7f547f85 RD |
198 | break; \ |
199 | default: \ | |
200 | ASM_BUG(); \ | |
201 | } \ | |
202 | } | |
203 | ||
204 | #define MUL2(r...) \ | |
205 | { \ | |
206 | switch (REG_CNT(r)) { \ | |
207 | case 4: \ | |
208 | _MUL2(R_01(r)); \ | |
209 | _MUL2(R_23(r)); \ | |
210 | break; \ | |
211 | case 2: \ | |
212 | _MUL2(r); \ | |
213 | break; \ | |
214 | default: \ | |
215 | ASM_BUG(); \ | |
216 | } \ | |
217 | } | |
218 | ||
219 | #define MUL4(r...) \ | |
220 | { \ | |
221 | MUL2(r); \ | |
222 | MUL2(r); \ | |
223 | } | |
224 | ||
225 | #define _0f "zmm15" | |
226 | #define _as "zmm14" | |
227 | #define _bs "zmm13" | |
228 | #define _ltmod "zmm12" | |
229 | #define _ltmul "zmm11" | |
230 | #define _ta "zmm10" | |
231 | #define _tb "zmm15" | |
232 | ||
01017962 | 233 | static const uint8_t __attribute__((aligned(64))) _mul_mask = 0x0F; |
7f547f85 RD |
234 | |
235 | #define _MULx2(c, r...) \ | |
236 | { \ | |
237 | switch (REG_CNT(r)) { \ | |
238 | case 2: \ | |
239 | __asm( \ | |
240 | "vpbroadcastb (%[mask]), %%" _0f "\n" \ | |
241 | /* upper bits */ \ | |
242 | "vbroadcasti32x4 0x00(%[lt]), %%" _ltmod "\n" \ | |
243 | "vbroadcasti32x4 0x10(%[lt]), %%" _ltmul "\n" \ | |
244 | \ | |
245 | "vpsraw $0x4, %%" VR0(r) ", %%"_as "\n" \ | |
246 | "vpsraw $0x4, %%" VR1(r) ", %%"_bs "\n" \ | |
247 | "vpandq %%" _0f ", %%" VR0(r) ", %%" VR0(r) "\n" \ | |
248 | "vpandq %%" _0f ", %%" VR1(r) ", %%" VR1(r) "\n" \ | |
249 | "vpandq %%" _0f ", %%" _as ", %%" _as "\n" \ | |
250 | "vpandq %%" _0f ", %%" _bs ", %%" _bs "\n" \ | |
251 | \ | |
252 | "vpshufb %%" _as ", %%" _ltmod ", %%" _ta "\n" \ | |
253 | "vpshufb %%" _bs ", %%" _ltmod ", %%" _tb "\n" \ | |
254 | "vpshufb %%" _as ", %%" _ltmul ", %%" _as "\n" \ | |
255 | "vpshufb %%" _bs ", %%" _ltmul ", %%" _bs "\n" \ | |
256 | /* lower bits */ \ | |
257 | "vbroadcasti32x4 0x20(%[lt]), %%" _ltmod "\n" \ | |
258 | "vbroadcasti32x4 0x30(%[lt]), %%" _ltmul "\n" \ | |
259 | \ | |
260 | "vpxorq %%" _ta ", %%" _as ", %%" _as "\n" \ | |
261 | "vpxorq %%" _tb ", %%" _bs ", %%" _bs "\n" \ | |
262 | \ | |
263 | "vpshufb %%" VR0(r) ", %%" _ltmod ", %%" _ta "\n" \ | |
264 | "vpshufb %%" VR1(r) ", %%" _ltmod ", %%" _tb "\n" \ | |
265 | "vpshufb %%" VR0(r) ", %%" _ltmul ", %%" VR0(r) "\n"\ | |
266 | "vpshufb %%" VR1(r) ", %%" _ltmul ", %%" VR1(r) "\n"\ | |
267 | \ | |
268 | "vpxorq %%" _ta ", %%" VR0(r) ", %%" VR0(r) "\n" \ | |
269 | "vpxorq %%" _as ", %%" VR0(r) ", %%" VR0(r) "\n" \ | |
270 | "vpxorq %%" _tb ", %%" VR1(r) ", %%" VR1(r) "\n" \ | |
271 | "vpxorq %%" _bs ", %%" VR1(r) ", %%" VR1(r) "\n" \ | |
272 | : : [mask] "r" (&_mul_mask), \ | |
273 | [lt] "r" (gf_clmul_mod_lt[4*(c)])); \ | |
274 | break; \ | |
275 | default: \ | |
276 | ASM_BUG(); \ | |
277 | } \ | |
278 | } | |
279 | ||
280 | #define MUL(c, r...) \ | |
281 | { \ | |
282 | switch (REG_CNT(r)) { \ | |
283 | case 4: \ | |
284 | _MULx2(c, R_01(r)); \ | |
285 | _MULx2(c, R_23(r)); \ | |
286 | break; \ | |
287 | case 2: \ | |
288 | _MULx2(c, R_01(r)); \ | |
289 | break; \ | |
290 | default: \ | |
291 | ASM_BUG(); \ | |
292 | } \ | |
293 | } | |
294 | ||
295 | #define raidz_math_begin() kfpu_begin() | |
01017962 GN |
296 | #define raidz_math_end() kfpu_end() |
297 | ||
298 | /* | |
299 | * ZERO, COPY, and MUL operations are already 2x unrolled, which means that | |
300 | * the stride of these operations for avx512 must not exceed 4. Otherwise, a | |
301 | * single step would exceed 512B block size. | |
302 | */ | |
303 | ||
304 | #define SYN_STRIDE 4 | |
7f547f85 | 305 | |
cbf484f8 GN |
306 | #define ZERO_STRIDE 4 |
307 | #define ZERO_DEFINE() {} | |
308 | #define ZERO_D 0, 1, 2, 3 | |
309 | ||
310 | #define COPY_STRIDE 4 | |
311 | #define COPY_DEFINE() {} | |
312 | #define COPY_D 0, 1, 2, 3 | |
313 | ||
314 | #define ADD_STRIDE 4 | |
315 | #define ADD_DEFINE() {} | |
02730c33 | 316 | #define ADD_D 0, 1, 2, 3 |
cbf484f8 GN |
317 | |
318 | #define MUL_STRIDE 4 | |
02730c33 | 319 | #define MUL_DEFINE() {} |
cbf484f8 GN |
320 | #define MUL_D 0, 1, 2, 3 |
321 | ||
7f547f85 | 322 | #define GEN_P_STRIDE 4 |
01017962 | 323 | #define GEN_P_DEFINE() {} |
7f547f85 RD |
324 | #define GEN_P_P 0, 1, 2, 3 |
325 | ||
7f547f85 | 326 | #define GEN_PQ_STRIDE 4 |
01017962 | 327 | #define GEN_PQ_DEFINE() {} |
7f547f85 | 328 | #define GEN_PQ_D 0, 1, 2, 3 |
01017962 GN |
329 | #define GEN_PQ_C 4, 5, 6, 7 |
330 | ||
331 | #define GEN_PQR_STRIDE 4 | |
332 | #define GEN_PQR_DEFINE() {} | |
333 | #define GEN_PQR_D 0, 1, 2, 3 | |
334 | #define GEN_PQR_C 4, 5, 6, 7 | |
335 | ||
336 | #define SYN_Q_DEFINE() {} | |
337 | #define SYN_Q_D 0, 1, 2, 3 | |
338 | #define SYN_Q_X 4, 5, 6, 7 | |
339 | ||
340 | #define SYN_R_DEFINE() {} | |
341 | #define SYN_R_D 0, 1, 2, 3 | |
342 | #define SYN_R_X 4, 5, 6, 7 | |
343 | ||
344 | #define SYN_PQ_DEFINE() {} | |
345 | #define SYN_PQ_D 0, 1, 2, 3 | |
346 | #define SYN_PQ_X 4, 5, 6, 7 | |
347 | ||
348 | #define REC_PQ_STRIDE 2 | |
349 | #define REC_PQ_DEFINE() {} | |
350 | #define REC_PQ_X 0, 1 | |
351 | #define REC_PQ_Y 2, 3 | |
352 | #define REC_PQ_T 4, 5 | |
353 | ||
354 | #define SYN_PR_DEFINE() {} | |
355 | #define SYN_PR_D 0, 1, 2, 3 | |
356 | #define SYN_PR_X 4, 5, 6, 7 | |
357 | ||
358 | #define REC_PR_STRIDE 2 | |
359 | #define REC_PR_DEFINE() {} | |
360 | #define REC_PR_X 0, 1 | |
361 | #define REC_PR_Y 2, 3 | |
362 | #define REC_PR_T 4, 5 | |
363 | ||
364 | #define SYN_QR_DEFINE() {} | |
365 | #define SYN_QR_D 0, 1, 2, 3 | |
366 | #define SYN_QR_X 4, 5, 6, 7 | |
367 | ||
368 | #define REC_QR_STRIDE 2 | |
369 | #define REC_QR_DEFINE() {} | |
370 | #define REC_QR_X 0, 1 | |
371 | #define REC_QR_Y 2, 3 | |
372 | #define REC_QR_T 4, 5 | |
373 | ||
374 | #define SYN_PQR_DEFINE() {} | |
375 | #define SYN_PQR_D 0, 1, 2, 3 | |
376 | #define SYN_PQR_X 4, 5, 6, 7 | |
377 | ||
7f547f85 | 378 | #define REC_PQR_STRIDE 2 |
01017962 | 379 | #define REC_PQR_DEFINE() {} |
7f547f85 RD |
380 | #define REC_PQR_X 0, 1 |
381 | #define REC_PQR_Y 2, 3 | |
382 | #define REC_PQR_Z 4, 5 | |
7f547f85 RD |
383 | #define REC_PQR_XS 6, 7 |
384 | #define REC_PQR_YS 8, 9 | |
385 | ||
386 | ||
387 | #include <sys/vdev_raidz_impl.h> | |
388 | #include "vdev_raidz_math_impl.h" | |
389 | ||
390 | DEFINE_GEN_METHODS(avx512bw); | |
391 | DEFINE_REC_METHODS(avx512bw); | |
392 | ||
393 | static boolean_t | |
394 | raidz_will_avx512bw_work(void) | |
395 | { | |
396 | return (zfs_avx_available() && | |
02730c33 BB |
397 | zfs_avx512f_available() && |
398 | zfs_avx512bw_available()); | |
7f547f85 RD |
399 | } |
400 | ||
401 | const raidz_impl_ops_t vdev_raidz_avx512bw_impl = { | |
402 | .init = NULL, | |
403 | .fini = NULL, | |
404 | .gen = RAIDZ_GEN_METHODS(avx512bw), | |
405 | .rec = RAIDZ_REC_METHODS(avx512bw), | |
406 | .is_supported = &raidz_will_avx512bw_work, | |
407 | .name = "avx512bw" | |
408 | }; | |
409 | ||
410 | #endif /* defined(__x86_64) && defined(HAVE_AVX512BW) */ |