]> git.proxmox.com Git - ceph.git/blame - ceph/src/isa-l/erasure_code/gf_5vect_mad_avx2.asm
import quincy beta 17.1.0
[ceph.git] / ceph / src / isa-l / erasure_code / gf_5vect_mad_avx2.asm
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
f91f0fd5 5; modification, are permitted provided that the following conditions
7c673cae
FG
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;;
31;;; gf_5vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
32;;;
33
34%include "reg_sizes.asm"
35
36%define PS 8
37
38%ifidn __OUTPUT_FORMAT__, win64
39 %define arg0 rcx
40 %define arg0.w ecx
41 %define arg1 rdx
42 %define arg2 r8
43 %define arg3 r9
44 %define arg4 r12
45 %define arg5 r15
46 %define tmp r11
47 %define tmp.w r11d
48 %define tmp.b r11b
49 %define tmp2 r10
50 %define return rax
51 %define return.w eax
52 %define stack_size 16*10 + 3*8
53 %define arg(x) [rsp + stack_size + PS + PS*x]
54 %define func(x) proc_frame x
55
56%macro FUNC_SAVE 0
57 sub rsp, stack_size
58 movdqa [rsp+16*0],xmm6
59 movdqa [rsp+16*1],xmm7
60 movdqa [rsp+16*2],xmm8
61 movdqa [rsp+16*3],xmm9
62 movdqa [rsp+16*4],xmm10
63 movdqa [rsp+16*5],xmm11
64 movdqa [rsp+16*6],xmm12
65 movdqa [rsp+16*7],xmm13
66 movdqa [rsp+16*8],xmm14
67 movdqa [rsp+16*9],xmm15
68 save_reg r12, 10*16 + 0*8
69 save_reg r15, 10*16 + 1*8
70 end_prolog
71 mov arg4, arg(4)
72 mov arg5, arg(5)
73%endmacro
74
75%macro FUNC_RESTORE 0
76 movdqa xmm6, [rsp+16*0]
77 movdqa xmm7, [rsp+16*1]
78 movdqa xmm8, [rsp+16*2]
79 movdqa xmm9, [rsp+16*3]
80 movdqa xmm10, [rsp+16*4]
81 movdqa xmm11, [rsp+16*5]
82 movdqa xmm12, [rsp+16*6]
83 movdqa xmm13, [rsp+16*7]
84 movdqa xmm14, [rsp+16*8]
85 movdqa xmm15, [rsp+16*9]
86 mov r12, [rsp + 10*16 + 0*8]
87 mov r15, [rsp + 10*16 + 1*8]
88 add rsp, stack_size
89%endmacro
90
91%elifidn __OUTPUT_FORMAT__, elf64
92 %define arg0 rdi
93 %define arg0.w edi
94 %define arg1 rsi
95 %define arg2 rdx
96 %define arg3 rcx
97 %define arg4 r8
98 %define arg5 r9
99 %define tmp r11
100 %define tmp.w r11d
101 %define tmp.b r11b
102 %define tmp2 r10
103 %define return rax
104 %define return.w eax
105
20effc67 106 %define func(x) x: endbranch
7c673cae
FG
107 %define FUNC_SAVE
108 %define FUNC_RESTORE
109%endif
110
111;;; gf_5vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
112%define len arg0
113%define len.w arg0.w
114%define vec arg1
115%define vec_i arg2
116%define mul_array arg3
117%define src arg4
118%define dest1 arg5
119%define pos return
120%define pos.w return.w
121
122%define dest2 tmp2
123%define dest3 mul_array
124%define dest4 vec
125%define dest5 vec_i
126
127%ifndef EC_ALIGNED_ADDR
128;;; Use Un-aligned load/store
129 %define XLDR vmovdqu
130 %define XSTR vmovdqu
131%else
132;;; Use Non-temporal load/stor
133 %ifdef NO_NT_LDST
134 %define XLDR vmovdqa
135 %define XSTR vmovdqa
136 %else
137 %define XLDR vmovntdqa
138 %define XSTR vmovntdq
139 %endif
140%endif
141
142default rel
143
144[bits 64]
145section .text
146
147%define xmask0f ymm15
148%define xmask0fx xmm15
149%define xgft1_lo ymm14
150%define xgft2_lo ymm13
151%define xgft3_lo ymm12
152%define xgft4_lo ymm11
153%define xgft5_lo ymm10
154
155%define x0 ymm0
156%define xtmpa ymm1
157%define xtmpl ymm2
158%define xtmplx xmm2
159%define xtmph1 ymm3
160%define xtmph1x xmm3
161%define xtmph2 ymm4
162%define xd1 ymm5
163%define xd2 ymm6
164%define xd3 ymm7
165%define xd4 ymm8
166%define xd5 ymm9
167
168align 16
20effc67 169mk_global gf_5vect_mad_avx2, function
7c673cae
FG
170func(gf_5vect_mad_avx2)
171 FUNC_SAVE
172 sub len, 32
173 jl .return_fail
174 xor pos, pos
175 mov tmp.b, 0x0f
176 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
177 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
178
179 sal vec_i, 5 ;Multiply by 32
180 sal vec, 5 ;Multiply by 32
181 lea tmp, [mul_array + vec_i]
182
183 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
184 ; " Ax{00}, Ax{10}, ..., Ax{f0}
185 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
186 ; " Bx{00}, Bx{10}, ..., Bx{f0}
187 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
188 ; " Cx{00}, Cx{10}, ..., Cx{f0}
189 vmovdqu xgft5_lo, [tmp+4*vec] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
190 ; " Ex{00}, Ex{10}, ..., Ex{f0}
191 add tmp, vec
192 vmovdqu xgft4_lo, [tmp+2*vec] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
193 ; " Dx{00}, Dx{10}, ..., Dx{f0}
194
195 mov dest3, [dest1+2*PS] ; reuse mul_array
196 mov dest4, [dest1+3*PS] ; reuse vec
197 mov dest5, [dest1+4*PS] ; reuse vec_i
198 mov dest2, [dest1+PS]
199 mov dest1, [dest1]
200
201.loop32:
202 XLDR x0, [src+pos] ;Get next source vector
203
204 XLDR xd1, [dest1+pos] ;Get next dest vector
205 XLDR xd2, [dest2+pos] ;Get next dest vector
206 XLDR xd3, [dest3+pos] ;Get next dest vector
207 XLDR xd4, [dest4+pos] ;Get next dest vector
208 XLDR xd5, [dest5+pos] ;Get next dest vector
209
210 vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
211 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
212 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
213 vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
214 vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
215
216 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
217 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
218
219 ; dest1
220 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
221 vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
222 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
223 vpxor xd1, xd1, xtmph1 ;xd1 += partial
224
225 vperm2i128 xtmph1, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
226 ; dest2
227 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
228 vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
229 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
230 vpxor xd2, xd2, xtmph2 ;xd2 += partial
231
232 vperm2i128 xtmph2, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
233 ; dest3
234 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
235 vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
236 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
237 vpxor xd3, xd3, xtmph1 ;xd3 += partial
238
239 vperm2i128 xtmph1, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
240 ; dest4
241 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
242 vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
243 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
244 vpxor xd4, xd4, xtmph2 ;xd4 += partial
245
246 ; dest5
247 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
248 vpshufb xtmpl, xgft5_lo, xtmpa ;Lookup mul table of low nibble
249 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
250 vpxor xd5, xd5, xtmph1 ;xd5 += partial
251
252 XSTR [dest1+pos], xd1
253 XSTR [dest2+pos], xd2
254 XSTR [dest3+pos], xd3
255 XSTR [dest4+pos], xd4
256 XSTR [dest5+pos], xd5
257
258 add pos, 32 ;Loop on 32 bytes at a time
259 cmp pos, len
260 jle .loop32
261
262 lea tmp, [len + 32]
263 cmp pos, tmp
264 je .return_pass
265
266.lessthan32:
267 ;; Tail len
268 ;; Do one more overlap pass
269 mov tmp.b, 0x1f
270 vpinsrb xtmph1x, xtmph1x, tmp.w, 0
271 vpbroadcastb xtmph1, xtmph1x ;Construct mask 0x1f1f1f...
272
273 mov tmp, len ;Overlapped offset length-32
274
275 XLDR x0, [src+tmp] ;Get next source vector
276
277 XLDR xd1, [dest1+tmp] ;Get next dest vector
278 XLDR xd2, [dest2+tmp] ;Get next dest vector
279 XLDR xd3, [dest3+tmp] ;Get next dest vector
280 XLDR xd4, [dest4+tmp] ;Get next dest vector
281 XLDR xd5, [dest5+tmp] ;Get next dest vector
282
283 sub len, pos
284
285 vmovdqa xtmph2, [constip32] ;Load const of i + 32
286 vpinsrb xtmplx, xtmplx, len.w, 15
287 vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
288 vpshufb xtmpl, xtmpl, xtmph1 ;Broadcast len to all bytes. xtmph1=0x1f1f1f...
289 vpcmpgtb xtmpl, xtmpl, xtmph2
290
291 vpand xtmph1, x0, xmask0f ;Mask low src nibble in bits 4-0
292 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
293 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
294 vperm2i128 xtmpa, xtmph1, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
295 vperm2i128 x0, xtmph1, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
296
297 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
298 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
299
300 ; dest1
301 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
302 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
303 vpxor xtmph1, xtmph1, xgft1_lo ;GF add high and low partials
304 vpand xtmph1, xtmph1, xtmpl
305 vpxor xd1, xd1, xtmph1 ;xd1 += partial
306
307 vperm2i128 xtmph1, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
308 ; dest2
309 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
310 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
311 vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
312 vpand xtmph2, xtmph2, xtmpl
313 vpxor xd2, xd2, xtmph2 ;xd2 += partial
314
315 vperm2i128 xtmph2, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
316 ; dest3
317 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
318 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
319 vpxor xtmph1, xtmph1, xgft3_lo ;GF add high and low partials
320 vpand xtmph1, xtmph1, xtmpl
321 vpxor xd3, xd3, xtmph1 ;xd3 += partial
322
323 vperm2i128 xtmph1, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
324 ; dest4
325 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
326 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
327 vpxor xtmph2, xtmph2, xgft4_lo ;GF add high and low partials
328 vpand xtmph2, xtmph2, xtmpl
329 vpxor xd4, xd4, xtmph2 ;xd4 += partial
330
331 ; dest5
332 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
333 vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
334 vpxor xtmph1, xtmph1, xgft5_lo ;GF add high and low partials
335 vpand xtmph1, xtmph1, xtmpl
336 vpxor xd5, xd5, xtmph1 ;xd5 += partial
337
338 XSTR [dest1+tmp], xd1
339 XSTR [dest2+tmp], xd2
340 XSTR [dest3+tmp], xd3
341 XSTR [dest4+tmp], xd4
342 XSTR [dest5+tmp], xd5
343
344.return_pass:
345 FUNC_RESTORE
346 mov return, 0
347 ret
348
349.return_fail:
350 FUNC_RESTORE
351 mov return, 1
352 ret
353
354endproc_frame
355
356section .data
357align 32
358constip32:
359 dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
360 dq 0xe8e9eaebecedeeef, 0xe0e1e2e3e4e5e6e7
361
362;;; func core, ver, snum
363slversion gf_5vect_mad_avx2, 04, 01, 020e