]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - lib/raid6/recov_avx2.c
lib/raid6: Add AVX2 optimized recovery functions
[mirror_ubuntu-zesty-kernel.git] / lib / raid6 / recov_avx2.c
CommitLineData
7056741f
JK
1/*
2 * Copyright (C) 2012 Intel Corporation
3 * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10
11#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
12
13#if CONFIG_AS_AVX2
14
15#include <linux/raid/pq.h>
16#include "x86.h"
17
18static int raid6_has_avx2(void)
19{
20 return boot_cpu_has(X86_FEATURE_AVX2) &&
21 boot_cpu_has(X86_FEATURE_AVX);
22}
23
24static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
25 int failb, void **ptrs)
26{
27 u8 *p, *q, *dp, *dq;
28 const u8 *pbmul; /* P multiplier table for B data */
29 const u8 *qmul; /* Q multiplier table (for both) */
30 const u8 x0f = 0x0f;
31
32 p = (u8 *)ptrs[disks-2];
33 q = (u8 *)ptrs[disks-1];
34
35 /* Compute syndrome with zero for the missing data pages
36 Use the dead data pages as temporary storage for
37 delta p and delta q */
38 dp = (u8 *)ptrs[faila];
39 ptrs[faila] = (void *)raid6_empty_zero_page;
40 ptrs[disks-2] = dp;
41 dq = (u8 *)ptrs[failb];
42 ptrs[failb] = (void *)raid6_empty_zero_page;
43 ptrs[disks-1] = dq;
44
45 raid6_call.gen_syndrome(disks, bytes, ptrs);
46
47 /* Restore pointer table */
48 ptrs[faila] = dp;
49 ptrs[failb] = dq;
50 ptrs[disks-2] = p;
51 ptrs[disks-1] = q;
52
53 /* Now, pick the proper data tables */
54 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
55 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
56 raid6_gfexp[failb]]];
57
58 kernel_fpu_begin();
59
60 /* ymm0 = x0f[16] */
61 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
62
63 while (bytes) {
64#ifdef CONFIG_X86_64
65 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
66 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
67 asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
68 asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
69 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
70 asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
71 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
72 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
73
74 /*
75 * 1 = dq[0] ^ q[0]
76 * 9 = dq[32] ^ q[32]
77 * 0 = dp[0] ^ p[0]
78 * 8 = dp[32] ^ p[32]
79 */
80
81 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
82 asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
83
84 asm volatile("vpsraw $4, %ymm1, %ymm3");
85 asm volatile("vpsraw $4, %ymm9, %ymm12");
86 asm volatile("vpand %ymm7, %ymm1, %ymm1");
87 asm volatile("vpand %ymm7, %ymm9, %ymm9");
88 asm volatile("vpand %ymm7, %ymm3, %ymm3");
89 asm volatile("vpand %ymm7, %ymm12, %ymm12");
90 asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
91 asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
92 asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
93 asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
94 asm volatile("vpxor %ymm14, %ymm15, %ymm15");
95 asm volatile("vpxor %ymm4, %ymm5, %ymm5");
96
97 /*
98 * 5 = qx[0]
99 * 15 = qx[32]
100 */
101
102 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
103 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
104 asm volatile("vpsraw $4, %ymm0, %ymm2");
105 asm volatile("vpsraw $4, %ymm8, %ymm6");
106 asm volatile("vpand %ymm7, %ymm0, %ymm3");
107 asm volatile("vpand %ymm7, %ymm8, %ymm14");
108 asm volatile("vpand %ymm7, %ymm2, %ymm2");
109 asm volatile("vpand %ymm7, %ymm6, %ymm6");
110 asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
111 asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
112 asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
113 asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
114 asm volatile("vpxor %ymm4, %ymm1, %ymm1");
115 asm volatile("vpxor %ymm12, %ymm13, %ymm13");
116
117 /*
118 * 1 = pbmul[px[0]]
119 * 13 = pbmul[px[32]]
120 */
121 asm volatile("vpxor %ymm5, %ymm1, %ymm1");
122 asm volatile("vpxor %ymm15, %ymm13, %ymm13");
123
124 /*
125 * 1 = db = DQ
126 * 13 = db[32] = DQ[32]
127 */
128 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
129 asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
130 asm volatile("vpxor %ymm1, %ymm0, %ymm0");
131 asm volatile("vpxor %ymm13, %ymm8, %ymm8");
132
133 asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
134 asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
135
136 bytes -= 64;
137 p += 64;
138 q += 64;
139 dp += 64;
140 dq += 64;
141#else
142 asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
143 asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
144 asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
145 asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
146
147 /* 1 = dq ^ q; 0 = dp ^ p */
148
149 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
150 asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
151
152 /*
153 * 1 = dq ^ q
154 * 3 = dq ^ p >> 4
155 */
156 asm volatile("vpsraw $4, %ymm1, %ymm3");
157 asm volatile("vpand %ymm7, %ymm1, %ymm1");
158 asm volatile("vpand %ymm7, %ymm3, %ymm3");
159 asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
160 asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
161 asm volatile("vpxor %ymm4, %ymm5, %ymm5");
162
163 /* 5 = qx */
164
165 asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
166 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
167
168 asm volatile("vpsraw $4, %ymm0, %ymm2");
169 asm volatile("vpand %ymm7, %ymm0, %ymm3");
170 asm volatile("vpand %ymm7, %ymm2, %ymm2");
171 asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
172 asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
173 asm volatile("vpxor %ymm4, %ymm1, %ymm1");
174
175 /* 1 = pbmul[px] */
176 asm volatile("vpxor %ymm5, %ymm1, %ymm1");
177 /* 1 = db = DQ */
178 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
179
180 asm volatile("vpxor %ymm1, %ymm0, %ymm0");
181 asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
182
183 bytes -= 32;
184 p += 32;
185 q += 32;
186 dp += 32;
187 dq += 32;
188#endif
189 }
190
191 kernel_fpu_end();
192}
193
194static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
195 void **ptrs)
196{
197 u8 *p, *q, *dq;
198 const u8 *qmul; /* Q multiplier table */
199 const u8 x0f = 0x0f;
200
201 p = (u8 *)ptrs[disks-2];
202 q = (u8 *)ptrs[disks-1];
203
204 /* Compute syndrome with zero for the missing data page
205 Use the dead data page as temporary storage for delta q */
206 dq = (u8 *)ptrs[faila];
207 ptrs[faila] = (void *)raid6_empty_zero_page;
208 ptrs[disks-1] = dq;
209
210 raid6_call.gen_syndrome(disks, bytes, ptrs);
211
212 /* Restore pointer table */
213 ptrs[faila] = dq;
214 ptrs[disks-1] = q;
215
216 /* Now, pick the proper data tables */
217 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
218
219 kernel_fpu_begin();
220
221 asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
222
223 while (bytes) {
224#ifdef CONFIG_X86_64
225 asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
226 asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
227 asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
228 asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
229
230 /*
231 * 3 = q[0] ^ dq[0]
232 * 8 = q[32] ^ dq[32]
233 */
234 asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
235 asm volatile("vmovapd %ymm0, %ymm13");
236 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
237 asm volatile("vmovapd %ymm1, %ymm14");
238
239 asm volatile("vpsraw $4, %ymm3, %ymm6");
240 asm volatile("vpsraw $4, %ymm8, %ymm12");
241 asm volatile("vpand %ymm7, %ymm3, %ymm3");
242 asm volatile("vpand %ymm7, %ymm8, %ymm8");
243 asm volatile("vpand %ymm7, %ymm6, %ymm6");
244 asm volatile("vpand %ymm7, %ymm12, %ymm12");
245 asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
246 asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
247 asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
248 asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
249 asm volatile("vpxor %ymm0, %ymm1, %ymm1");
250 asm volatile("vpxor %ymm13, %ymm14, %ymm14");
251
252 /*
253 * 1 = qmul[q[0] ^ dq[0]]
254 * 14 = qmul[q[32] ^ dq[32]]
255 */
256 asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
257 asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
258 asm volatile("vpxor %ymm1, %ymm2, %ymm2");
259 asm volatile("vpxor %ymm14, %ymm12, %ymm12");
260
261 /*
262 * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
263 * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
264 */
265
266 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
267 asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
268 asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
269 asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
270
271 bytes -= 64;
272 p += 64;
273 q += 64;
274 dq += 64;
275#else
276 asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
277 asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
278
279 /* 3 = q ^ dq */
280
281 asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
282 asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
283
284 asm volatile("vpsraw $4, %ymm3, %ymm6");
285 asm volatile("vpand %ymm7, %ymm3, %ymm3");
286 asm volatile("vpand %ymm7, %ymm6, %ymm6");
287 asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
288 asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
289 asm volatile("vpxor %ymm0, %ymm1, %ymm1");
290
291 /* 1 = qmul[q ^ dq] */
292
293 asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
294 asm volatile("vpxor %ymm1, %ymm2, %ymm2");
295
296 /* 2 = p ^ qmul[q ^ dq] */
297
298 asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
299 asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
300
301 bytes -= 32;
302 p += 32;
303 q += 32;
304 dq += 32;
305#endif
306 }
307
308 kernel_fpu_end();
309}
310
311const struct raid6_recov_calls raid6_recov_avx2 = {
312 .data2 = raid6_2data_recov_avx2,
313 .datap = raid6_datap_recov_avx2,
314 .valid = raid6_has_avx2,
315#ifdef CONFIG_X86_64
316 .name = "avx2x2",
317#else
318 .name = "avx2x1",
319#endif
320 .priority = 2,
321};
322
323#else
324#warning "your version of binutils lacks AVX2 support"
325#endif
326
327#endif