]> git.proxmox.com Git - ceph.git/blob - ceph/src/erasure-code/isa/isa-l/erasure_code/gf_4vect_mad_avx2.asm.s
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / erasure-code / isa / isa-l / erasure_code / gf_4vect_mad_avx2.asm.s
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 ;;;
31 ;;; gf_4vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
32 ;;;
33
34 %include "reg_sizes.asm"
35
36 %define PS 8
37
38 %ifidn __OUTPUT_FORMAT__, win64
39 %define arg0 rcx
40 %define arg0.w ecx
41 %define arg1 rdx
42 %define arg2 r8
43 %define arg3 r9
44 %define arg4 r12
45 %define arg5 r15
46 %define tmp r11
47 %define tmp.w r11d
48 %define tmp.b r11b
49 %define return rax
50 %define return.w eax
51 %define stack_size 16*10 + 3*8
52 %define arg(x) [rsp + stack_size + PS + PS*x]
53 %define func(x) proc_frame x
54
55 %macro FUNC_SAVE 0
56 sub rsp, stack_size
57 movdqa [rsp+16*0],xmm6
58 movdqa [rsp+16*1],xmm7
59 movdqa [rsp+16*2],xmm8
60 movdqa [rsp+16*3],xmm9
61 movdqa [rsp+16*4],xmm10
62 movdqa [rsp+16*5],xmm11
63 movdqa [rsp+16*6],xmm12
64 movdqa [rsp+16*7],xmm13
65 movdqa [rsp+16*8],xmm14
66 movdqa [rsp+16*9],xmm15
67 save_reg r12, 10*16 + 0*8
68 save_reg r15, 10*16 + 1*8
69 end_prolog
70 mov arg4, arg(4)
71 mov arg5, arg(5)
72 %endmacro
73
74 %macro FUNC_RESTORE 0
75 movdqa xmm6, [rsp+16*0]
76 movdqa xmm7, [rsp+16*1]
77 movdqa xmm8, [rsp+16*2]
78 movdqa xmm9, [rsp+16*3]
79 movdqa xmm10, [rsp+16*4]
80 movdqa xmm11, [rsp+16*5]
81 movdqa xmm12, [rsp+16*6]
82 movdqa xmm13, [rsp+16*7]
83 movdqa xmm14, [rsp+16*8]
84 movdqa xmm15, [rsp+16*9]
85 mov r12, [rsp + 10*16 + 0*8]
86 mov r15, [rsp + 10*16 + 1*8]
87 add rsp, stack_size
88 %endmacro
89
90 %elifidn __OUTPUT_FORMAT__, elf64
91 %define arg0 rdi
92 %define arg0.w edi
93 %define arg1 rsi
94 %define arg2 rdx
95 %define arg3 rcx
96 %define arg4 r8
97 %define arg5 r9
98 %define tmp r11
99 %define tmp.w r11d
100 %define tmp.b r11b
101 %define return rax
102 %define return.w eax
103
104 %define func(x) x:
105 %define FUNC_SAVE
106 %define FUNC_RESTORE
107 %endif
108
109
110 ;;; gf_4vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
111 %define len arg0
112 %define len.w arg0.w
113 %define vec arg1
114 %define vec_i arg2
115 %define mul_array arg3
116 %define src arg4
117 %define dest1 arg5
118 %define pos return
119 %define pos.w return.w
120
121 %define dest2 mul_array
122 %define dest3 vec
123 %define dest4 vec_i
124
125 %ifndef EC_ALIGNED_ADDR
126 ;;; Use Un-aligned load/store
127 %define XLDR vmovdqu
128 %define XSTR vmovdqu
129 %else
130 ;;; Use Non-temporal load/stor
131 %ifdef NO_NT_LDST
132 %define XLDR vmovdqa
133 %define XSTR vmovdqa
134 %else
135 %define XLDR vmovntdqa
136 %define XSTR vmovntdq
137 %endif
138 %endif
139
140
141 default rel
142
143 [bits 64]
144 section .text
145
146 %define xmask0f ymm15
147 %define xmask0fx xmm15
148 %define xgft1_lo ymm14
149 %define xgft2_lo ymm13
150 %define xgft3_lo ymm12
151 %define xgft4_lo ymm11
152
153 %define x0 ymm0
154 %define xtmpa ymm1
155 %define xtmpl ymm2
156 %define xtmplx xmm2
157 %define xtmph1 ymm3
158 %define xtmph1x xmm3
159 %define xtmph2 ymm4
160 %define xtmph3 ymm5
161 %define xtmph4 ymm6
162 %define xd1 ymm7
163 %define xd2 ymm8
164 %define xd3 ymm9
165 %define xd4 ymm10
166
167 align 16
168 global gf_4vect_mad_avx2:function
169 func(gf_4vect_mad_avx2)
170 FUNC_SAVE
171 sub len, 32
172 jl .return_fail
173 xor pos, pos
174 mov tmp.b, 0x0f
175 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
176 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
177
178 sal vec_i, 5 ;Multiply by 32
179 sal vec, 5 ;Multiply by 32
180 lea tmp, [mul_array + vec_i]
181
182 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
183 ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
184 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
185 ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
186 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
187 ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
188 add tmp, vec
189 vmovdqu xgft4_lo, [tmp+2*vec] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
190 ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
191
192 mov dest2, [dest1+PS] ; reuse mul_array
193 mov dest3, [dest1+2*PS] ; reuse vec
194 mov dest4, [dest1+3*PS] ; reuse vec_i
195 mov dest1, [dest1]
196
197 .loop32:
198 XLDR x0, [src+pos] ;Get next source vector
199
200 XLDR xd1, [dest1+pos] ;Get next dest vector
201 XLDR xd2, [dest2+pos] ;Get next dest vector
202 XLDR xd3, [dest3+pos] ;Get next dest vector
203 XLDR xd4, [dest4+pos] ;reuse xtmpl1. Get next dest vector
204
205 vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
206 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
207 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
208
209 vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
210 vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
211
212 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
213 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
214 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
215 vperm2i128 xtmph4, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
216
217 ; dest1
218 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
219 vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
220 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
221 vpxor xd1, xd1, xtmph1 ;xd1 += partial
222
223 ; dest2
224 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
225 vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
226 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
227 vpxor xd2, xd2, xtmph2 ;xd2 += partial
228
229 ; dest3
230 vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
231 vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
232 vpxor xtmph3, xtmph3, xtmpl ;GF add high and low partials
233 vpxor xd3, xd3, xtmph3 ;xd3 += partial
234
235 ; dest4
236 vpshufb xtmph4, xtmph4, x0 ;Lookup mul table of high nibble
237 vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
238 vpxor xtmph4, xtmph4, xtmpl ;GF add high and low partials
239 vpxor xd4, xd4, xtmph4 ;xd4 += partial
240
241 XSTR [dest1+pos], xd1
242 XSTR [dest2+pos], xd2
243 XSTR [dest3+pos], xd3
244 XSTR [dest4+pos], xd4
245
246 add pos, 32 ;Loop on 32 bytes at a time
247 cmp pos, len
248 jle .loop32
249
250 lea tmp, [len + 32]
251 cmp pos, tmp
252 je .return_pass
253
254 .lessthan32:
255 ;; Tail len
256 ;; Do one more overlap pass
257 mov tmp.b, 0x1f
258 vpinsrb xtmph1x, xtmph1x, tmp.w, 0
259 vpbroadcastb xtmph1, xtmph1x ;Construct mask 0x1f1f1f...
260
261 mov tmp, len ;Overlapped offset length-32
262
263 XLDR x0, [src+tmp] ;Get next source vector
264
265 XLDR xd1, [dest1+tmp] ;Get next dest vector
266 XLDR xd2, [dest2+tmp] ;Get next dest vector
267 XLDR xd3, [dest3+tmp] ;Get next dest vector
268 XLDR xd4, [dest4+tmp] ;Get next dest vector
269
270 sub len, pos
271
272 vmovdqa xtmph2, [constip32] ;Load const of i + 32
273 vpinsrb xtmplx, xtmplx, len.w, 15
274 vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
275 vpshufb xtmpl, xtmpl, xtmph1 ;Broadcast len to all bytes. xtmph1=0x1f1f1f...
276 vpcmpgtb xtmpl, xtmpl, xtmph2
277
278 vpand xtmph1, x0, xmask0f ;Mask low src nibble in bits 4-0
279 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
280 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
281
282 vperm2i128 xtmpa, xtmph1, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
283 vperm2i128 x0, xtmph1, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
284
285 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
286 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
287 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
288 vperm2i128 xtmph4, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
289
290 ; dest1
291 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
292 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
293 vpxor xtmph1, xtmph1, xgft1_lo ;GF add high and low partials
294 vpand xtmph1, xtmph1, xtmpl
295 vpxor xd1, xd1, xtmph1 ;xd1 += partial
296
297 ; dest2
298 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
299 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
300 vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
301 vpand xtmph2, xtmph2, xtmpl
302 vpxor xd2, xd2, xtmph2 ;xd2 += partial
303
304 ; dest3
305 vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
306 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
307 vpxor xtmph3, xtmph3, xgft3_lo ;GF add high and low partials
308 vpand xtmph3, xtmph3, xtmpl
309 vpxor xd3, xd3, xtmph3 ;xd3 += partial
310
311 ; dest4
312 vpshufb xtmph4, xtmph4, x0 ;Lookup mul table of high nibble
313 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
314 vpxor xtmph4, xtmph4, xgft4_lo ;GF add high and low partials
315 vpand xtmph4, xtmph4, xtmpl
316 vpxor xd4, xd4, xtmph4 ;xd4 += partial
317
318 XSTR [dest1+tmp], xd1
319 XSTR [dest2+tmp], xd2
320 XSTR [dest3+tmp], xd3
321 XSTR [dest4+tmp], xd4
322
323 .return_pass:
324 mov return, 0
325 FUNC_RESTORE
326 ret
327
328 .return_fail:
329 mov return, 1
330 FUNC_RESTORE
331 ret
332
333 endproc_frame
334
335 section .data
336 align 32
337 constip32:
338 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
339 ddq 0xe0e1e2e3e4e5e6e7e8e9eaebecedeeef
340
341 ;;; func core, ver, snum
342 slversion gf_4vect_mad_avx2, 04, 01, 020b