]> git.proxmox.com Git - ceph.git/blame - ceph/src/erasure-code/isa/isa-l/erasure_code/gf_6vect_mad_avx2.asm.s
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / erasure-code / isa / isa-l / erasure_code / gf_6vect_mad_avx2.asm.s
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;;
31;;; gf_6vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
32;;;
33
34%include "reg_sizes.asm"
35
36%define PS 8
37
38%ifidn __OUTPUT_FORMAT__, win64
39 %define arg0 rcx
40 %define arg0.w ecx
41 %define arg1 rdx
42 %define arg2 r8
43 %define arg3 r9
44 %define arg4 r12
45 %define arg5 r15
46 %define tmp r11
47 %define tmp.w r11d
48 %define tmp.b r11b
49 %define tmp2 r10
50 %define tmp3 r13
51 %define return rax
52 %define return.w eax
53 %define stack_size 16*10 + 3*8
54 %define arg(x) [rsp + stack_size + PS + PS*x]
55 %define func(x) proc_frame x
56
57%macro FUNC_SAVE 0
58 sub rsp, stack_size
59 movdqa [rsp+16*0],xmm6
60 movdqa [rsp+16*1],xmm7
61 movdqa [rsp+16*2],xmm8
62 movdqa [rsp+16*3],xmm9
63 movdqa [rsp+16*4],xmm10
64 movdqa [rsp+16*5],xmm11
65 movdqa [rsp+16*6],xmm12
66 movdqa [rsp+16*7],xmm13
67 movdqa [rsp+16*8],xmm14
68 movdqa [rsp+16*9],xmm15
69 save_reg r12, 10*16 + 0*8
70 save_reg r13, 10*16 + 1*8
71 save_reg r15, 10*16 + 2*8
72 end_prolog
73 mov arg4, arg(4)
74 mov arg5, arg(5)
75%endmacro
76
77%macro FUNC_RESTORE 0
78 movdqa xmm6, [rsp+16*0]
79 movdqa xmm7, [rsp+16*1]
80 movdqa xmm8, [rsp+16*2]
81 movdqa xmm9, [rsp+16*3]
82 movdqa xmm10, [rsp+16*4]
83 movdqa xmm11, [rsp+16*5]
84 movdqa xmm12, [rsp+16*6]
85 movdqa xmm13, [rsp+16*7]
86 movdqa xmm14, [rsp+16*8]
87 movdqa xmm15, [rsp+16*9]
88 mov r12, [rsp + 10*16 + 0*8]
89 mov r13, [rsp + 10*16 + 1*8]
90 mov r15, [rsp + 10*16 + 3*8]
91 add rsp, stack_size
92%endmacro
93
94%elifidn __OUTPUT_FORMAT__, elf64
95 %define arg0 rdi
96 %define arg0.w edi
97 %define arg1 rsi
98 %define arg2 rdx
99 %define arg3 rcx
100 %define arg4 r8
101 %define arg5 r9
102 %define tmp r11
103 %define tmp.w r11d
104 %define tmp.b r11b
105 %define tmp2 r10
106 %define tmp3 r12
107 %define return rax
108 %define return.w eax
109
110 %define func(x) x:
111 %macro FUNC_SAVE 0
112 push r12
113 %endmacro
114 %macro FUNC_RESTORE 0
115 pop r12
116 %endmacro
117%endif
118
119;;; gf_6vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
120%define len arg0
121%define len.w arg0.w
122%define vec arg1
123%define vec_i arg2
124%define mul_array arg3
125%define src arg4
126%define dest1 arg5
127%define pos return
128%define pos.w return.w
129
130%define dest2 tmp3
131%define dest3 tmp2
132%define dest4 mul_array
133%define dest5 vec
134%define dest6 vec_i
135
136%ifndef EC_ALIGNED_ADDR
137;;; Use Un-aligned load/store
138 %define XLDR vmovdqu
139 %define XSTR vmovdqu
140%else
141;;; Use Non-temporal load/stor
142 %ifdef NO_NT_LDST
143 %define XLDR vmovdqa
144 %define XSTR vmovdqa
145 %else
146 %define XLDR vmovntdqa
147 %define XSTR vmovntdq
148 %endif
149%endif
150
151
152default rel
153
154[bits 64]
155section .text
156
157%define xmask0f ymm15
158%define xmask0fx xmm15
159%define xgft1_lo ymm14
160%define xgft2_lo ymm13
161%define xgft3_lo ymm12
162%define xgft4_lo ymm11
163%define xgft5_lo ymm10
164%define xgft6_lo ymm9
165
166%define x0 ymm0
167%define xtmpa ymm1
168%define xtmpl ymm2
169%define xtmplx xmm2
170%define xtmph ymm3
171%define xtmphx xmm3
172%define xd1 ymm4
173%define xd2 ymm5
174%define xd3 ymm6
175%define xd4 ymm7
176%define xd5 ymm8
177%define xd6 xd1
178
179align 16
180global gf_6vect_mad_avx2:function
181func(gf_6vect_mad_avx2)
182 FUNC_SAVE
183 sub len, 32
184 jl .return_fail
185 xor pos, pos
186 mov tmp.b, 0x0f
187 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
188 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
189
190 sal vec_i, 5 ;Multiply by 32
191 sal vec, 5 ;Multiply by 32
192 lea tmp, [mul_array + vec_i]
193 mov vec_i, vec
194 mov mul_array, vec
195 sal vec_i, 1
196 sal mul_array, 1
197 add vec_i, vec ;vec_i=vec*96
198 add mul_array, vec_i ;vec_i=vec*160
199
200 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
201 ; " Ax{00}, Ax{10}, ..., Ax{f0}
202 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
203 ; " Bx{00}, Bx{10}, ..., Bx{f0}
204 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
205 ; " Cx{00}, Cx{10}, ..., Cx{f0}
206 vmovdqu xgft4_lo, [tmp+vec_i] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
207 ; " Fx{00}, Fx{10}, ..., Fx{f0}
208 vmovdqu xgft5_lo, [tmp+4*vec] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
209 ; " Ex{00}, Ex{10}, ..., Ex{f0}
210 vmovdqu xgft6_lo, [tmp+mul_array] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
211 ; " Dx{00}, Dx{10}, ..., Dx{f0}
212
213 mov dest2, [dest1+PS] ; reuse tmp3
214 mov dest3, [dest1+2*PS] ; reuse tmp2
215 mov dest4, [dest1+3*PS] ; reuse mul_array
216 mov dest5, [dest1+4*PS] ; reuse vec
217 mov dest6, [dest1+5*PS] ; reuse vec_i
218 mov dest1, [dest1]
219
220.loop32:
221 XLDR x0, [src+pos] ;Get next source vector
222 XLDR xd1, [dest1+pos] ;Get next dest vector
223 XLDR xd2, [dest2+pos] ;Get next dest vector
224 XLDR xd3, [dest3+pos] ;Get next dest vector
225 XLDR xd4, [dest4+pos] ;Get next dest vector
226 XLDR xd5, [dest5+pos] ;Get next dest vector
227
228 vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
229 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
230 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
231 vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
232 vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
233
234 ;dest1
235 vperm2i128 xtmph, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
236 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
237 vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
238 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
239 vpxor xd1, xd1, xtmph ;xd1 += partial
240
241 XSTR [dest1+pos], xd1 ;Store result into dest1
242
243 ;dest2
244 vperm2i128 xtmph, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
245 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
246 vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
247 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
248 vpxor xd2, xd2, xtmph ;xd2 += partial
249
250 ;dest3
251 vperm2i128 xtmph, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
252 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
253 vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
254 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
255 vpxor xd3, xd3, xtmph ;xd3 += partial
256
257 XLDR xd6, [dest6+pos] ;reuse xd1. Get next dest vector
258
259 ;dest4
260 vperm2i128 xtmph, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
261 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
262 vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
263 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
264 vpxor xd4, xd4, xtmph ;xd4 += partial
265
266 ;dest5
267 vperm2i128 xtmph, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
268 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
269 vpshufb xtmpl, xgft5_lo, xtmpa ;Lookup mul table of low nibble
270 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
271 vpxor xd5, xd5, xtmph ;xd5 += partial
272
273 ;dest6
274 vperm2i128 xtmph, xgft6_lo, xgft6_lo, 0x01 ; swapped to hi | lo
275 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
276 vpshufb xtmpl, xgft6_lo, xtmpa ;Lookup mul table of low nibble
277 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
278 vpxor xd6, xd6, xtmph ;xd6 += partial
279
280 XSTR [dest2+pos], xd2 ;Store result into dest2
281 XSTR [dest3+pos], xd3 ;Store result into dest3
282 XSTR [dest4+pos], xd4 ;Store result into dest4
283 XSTR [dest5+pos], xd5 ;Store result into dest5
284 XSTR [dest6+pos], xd6 ;Store result into dest6
285
286 add pos, 32 ;Loop on 32 bytes at a time
287 cmp pos, len
288 jle .loop32
289
290 lea tmp, [len + 32]
291 cmp pos, tmp
292 je .return_pass
293
294.lessthan32:
295 ;; Tail len
296 ;; Do one more overlap pass
297 mov tmp.b, 0x1f
298 vpinsrb xtmphx, xtmphx, tmp.w, 0
299 vpbroadcastb xtmph, xtmphx ;Construct mask 0x1f1f1f...
300
301 mov tmp, len ;Overlapped offset length-32
302
303 XLDR x0, [src+tmp] ;Get next source vector
304 XLDR xd1, [dest1+tmp] ;Get next dest vector
305 XLDR xd2, [dest2+tmp] ;Get next dest vector
306 XLDR xd3, [dest3+tmp] ;Get next dest vector
307 XLDR xd4, [dest4+tmp] ;Get next dest vector
308 XLDR xd5, [dest5+tmp] ;Get next dest vector
309
310 sub len, pos
311
312 vpinsrb xtmplx, xtmplx, len.w, 15
313 vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
314 vpshufb xtmpl, xtmpl, xtmph ;Broadcast len to all bytes. xtmph=0x1f1f1f...
315 vpcmpgtb xtmpl, xtmpl, [constip32]
316
317 vpand xtmph, x0, xmask0f ;Mask low src nibble in bits 4-0
318 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
319 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
320 vperm2i128 xtmpa, xtmph, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
321 vperm2i128 x0, xtmph, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
322
323 ;dest1
324 vperm2i128 xtmph, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
325 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
326 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
327 vpxor xtmph, xtmph, xgft1_lo ;GF add high and low partials
328 vpand xtmph, xtmph, xtmpl
329 vpxor xd1, xd1, xtmph ;xd1 += partial
330
331 XSTR [dest1+tmp], xd1 ;Store result into dest1
332
333 ;dest2
334 vperm2i128 xtmph, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
335 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
336 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
337 vpxor xtmph, xtmph, xgft2_lo ;GF add high and low partials
338 vpand xtmph, xtmph, xtmpl
339 vpxor xd2, xd2, xtmph ;xd2 += partial
340
341 ;dest3
342 vperm2i128 xtmph, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
343 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
344 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
345 vpxor xtmph, xtmph, xgft3_lo ;GF add high and low partials
346 vpand xtmph, xtmph, xtmpl
347 vpxor xd3, xd3, xtmph ;xd3 += partial
348
349 XLDR xd6, [dest6+tmp] ;reuse xd1. Get next dest vector
350
351 ;dest4
352 vperm2i128 xtmph, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
353 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
354 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
355 vpxor xtmph, xtmph, xgft4_lo ;GF add high and low partials
356 vpand xtmph, xtmph, xtmpl
357 vpxor xd4, xd4, xtmph ;xd4 += partial
358
359 ;dest5
360 vperm2i128 xtmph, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
361 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
362 vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
363 vpxor xtmph, xtmph, xgft5_lo ;GF add high and low partials
364 vpand xtmph, xtmph, xtmpl
365 vpxor xd5, xd5, xtmph ;xd5 += partial
366
367 ;dest6
368 vperm2i128 xtmph, xgft6_lo, xgft6_lo, 0x01 ; swapped to hi | lo
369 vpshufb xtmph, xtmph, x0 ;Lookup mul table of high nibble
370 vpshufb xgft6_lo, xgft6_lo, xtmpa ;Lookup mul table of low nibble
371 vpxor xtmph, xtmph, xgft6_lo ;GF add high and low partials
372 vpand xtmph, xtmph, xtmpl
373 vpxor xd6, xd6, xtmph ;xd6 += partial
374
375 XSTR [dest2+tmp], xd2 ;Store result into dest2
376 XSTR [dest3+tmp], xd3 ;Store result into dest3
377 XSTR [dest4+tmp], xd4 ;Store result into dest4
378 XSTR [dest5+tmp], xd5 ;Store result into dest5
379 XSTR [dest6+tmp], xd6 ;Store result into dest6
380
381.return_pass:
382 FUNC_RESTORE
383 mov return, 0
384 ret
385
386.return_fail:
387 FUNC_RESTORE
388 mov return, 1
389 ret
390
391endproc_frame
392
393section .data
394align 32
395constip32:
396 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
397 ddq 0xe0e1e2e3e4e5e6e7e8e9eaebecedeeef
398
399;;; func core, ver, snum
400slversion gf_6vect_mad_avx2, 04, 01, 0211