]> git.proxmox.com Git - ceph.git/blame - ceph/src/erasure-code/isa/isa-l/erasure_code/gf_6vect_mad_sse.asm.s
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / erasure-code / isa / isa-l / erasure_code / gf_6vect_mad_sse.asm.s
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;;
31;;; gf_6vect_mad_sse(len, vec, vec_i, mul_array, src, dest);
32;;;
33
34%include "reg_sizes.asm"
35
36%define PS 8
37
38%ifidn __OUTPUT_FORMAT__, win64
39 %define arg0 rcx
40 %define arg0.w ecx
41 %define arg1 rdx
42 %define arg2 r8
43 %define arg3 r9
44 %define arg4 r12
45 %define arg5 r15
46 %define tmp r11
47 %define tmp.w r11d
48 %define tmp2 r10
49 %define tmp3 r13
50 %define tmp4 r14
51 %define tmp5 rdi
52 %define return rax
53 %define return.w eax
54 %define stack_size 16*10 + 5*8
55 %define arg(x) [rsp + stack_size + PS + PS*x]
56 %define func(x) proc_frame x
57
58%macro FUNC_SAVE 0
59 sub rsp, stack_size
60 movdqa [rsp+16*0],xmm6
61 movdqa [rsp+16*1],xmm7
62 movdqa [rsp+16*2],xmm8
63 movdqa [rsp+16*3],xmm9
64 movdqa [rsp+16*4],xmm10
65 movdqa [rsp+16*5],xmm11
66 movdqa [rsp+16*6],xmm12
67 movdqa [rsp+16*7],xmm13
68 movdqa [rsp+16*8],xmm14
69 movdqa [rsp+16*9],xmm15
70 save_reg r12, 10*16 + 0*8
71 save_reg r13, 10*16 + 1*8
72 save_reg r14, 10*16 + 2*8
73 save_reg r15, 10*16 + 3*8
74 save_reg rdi, 10*16 + 4*8
75 end_prolog
76 mov arg4, arg(4)
77 mov arg5, arg(5)
78%endmacro
79
80%macro FUNC_RESTORE 0
81 movdqa xmm6, [rsp+16*0]
82 movdqa xmm7, [rsp+16*1]
83 movdqa xmm8, [rsp+16*2]
84 movdqa xmm9, [rsp+16*3]
85 movdqa xmm10, [rsp+16*4]
86 movdqa xmm11, [rsp+16*5]
87 movdqa xmm12, [rsp+16*6]
88 movdqa xmm13, [rsp+16*7]
89 movdqa xmm14, [rsp+16*8]
90 movdqa xmm15, [rsp+16*9]
91 mov r12, [rsp + 10*16 + 0*8]
92 mov r13, [rsp + 10*16 + 1*8]
93 mov r14, [rsp + 10*16 + 2*8]
94 mov r15, [rsp + 10*16 + 3*8]
95 mov rdi, [rsp + 10*16 + 4*8]
96 add rsp, stack_size
97%endmacro
98
99%elifidn __OUTPUT_FORMAT__, elf64
100 %define arg0 rdi
101 %define arg0.w edi
102 %define arg1 rsi
103 %define arg2 rdx
104 %define arg3 rcx
105 %define arg4 r8
106 %define arg5 r9
107 %define tmp r11
108 %define tmp.w r11d
109 %define tmp2 r10
110 %define tmp3 r12
111 %define tmp4 r13
112 %define tmp5 r14
113 %define return rax
114 %define return.w eax
115
116 %define func(x) x:
117 %macro FUNC_SAVE 0
118 push r12
119 push r13
120 push r14
121 %endmacro
122 %macro FUNC_RESTORE 0
123 pop r14
124 pop r13
125 pop r12
126 %endmacro
127%endif
128
129;;; gf_6vect_mad_sse(len, vec, vec_i, mul_array, src, dest)
130%define len arg0
131%define len.w arg0.w
132%define vec arg1
133%define vec_i arg2
134%define mul_array arg3
135%define src arg4
136%define dest1 arg5
137%define pos return
138%define pos.w return.w
139
140%define dest2 mul_array
141%define dest3 tmp2
142%define dest4 tmp4
143%define dest5 tmp5
144%define dest6 vec_i
145
146%ifndef EC_ALIGNED_ADDR
147;;; Use Un-aligned load/store
148 %define XLDR movdqu
149 %define XSTR movdqu
150%else
151;;; Use Non-temporal load/stor
152 %ifdef NO_NT_LDST
153 %define XLDR movdqa
154 %define XSTR movdqa
155 %else
156 %define XLDR movntdqa
157 %define XSTR movntdq
158 %endif
159%endif
160
161default rel
162
163[bits 64]
164section .text
165
166%define xmask0f xmm15
167%define xgft4_lo xmm14
168%define xgft4_hi xmm13
169%define xgft5_lo xmm12
170%define xgft5_hi xmm11
171%define xgft6_lo xmm10
172%define xgft6_hi xmm9
173
174%define x0 xmm0
175%define xtmpa xmm1
176%define xtmph1 xmm2
177%define xtmpl1 xmm3
178%define xtmph2 xmm4
179%define xtmpl2 xmm5
180%define xtmph3 xmm6
181%define xtmpl3 xmm7
182%define xd1 xmm8
183%define xd2 xtmpl1
184%define xd3 xtmph1
185
186
187align 16
188global gf_6vect_mad_sse:function
189func(gf_6vect_mad_sse)
190 FUNC_SAVE
191 sub len, 16
192 jl .return_fail
193
194 xor pos, pos
195 movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
196
197 mov tmp, vec
198 sal vec_i, 5 ;Multiply by 32
199 lea tmp3, [mul_array + vec_i]
200 sal tmp, 6 ;Multiply by 64
201
202 sal vec, 5 ;Multiply by 32
203 lea vec_i, [tmp + vec] ;vec_i = 96
204 lea mul_array, [tmp + vec_i] ;mul_array = 160
205
206 movdqu xgft5_lo, [tmp3+2*tmp] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
207 movdqu xgft5_hi, [tmp3+2*tmp+16] ; " Ex{00}, Ex{10}, ..., Ex{f0}
208 movdqu xgft4_lo, [tmp3+vec_i] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
209 movdqu xgft4_hi, [tmp3+vec_i+16] ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
210 movdqu xgft6_lo, [tmp3+mul_array] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
211 movdqu xgft6_hi, [tmp3+mul_array+16] ; " Fx{00}, Fx{10}, ..., Fx{f0}
212
213 mov dest2, [dest1+PS]
214 mov dest3, [dest1+2*PS]
215 mov dest4, [dest1+3*PS] ; reuse mul_array
216 mov dest5, [dest1+4*PS]
217 mov dest6, [dest1+5*PS] ; reuse vec_i
218 mov dest1, [dest1]
219
220.loop16:
221 XLDR x0, [src+pos] ;Get next source vector
222
223 movdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
224 movdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
225 movdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
226 movdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
227 movdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
228 movdqu xtmph3, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
229 XLDR xd1, [dest1+pos] ;Get next dest vector
230
231 movdqa xtmpa, x0 ;Keep unshifted copy of src
232 psraw x0, 4 ;Shift to put high nibble into bits 4-0
233 pand x0, xmask0f ;Mask high src nibble in bits 4-0
234 pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
235
236 ;dest1
237 pshufb xtmph1, x0 ;Lookup mul table of high nibble
238 pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
239 pxor xtmph1, xtmpl1 ;GF add high and low partials
240 pxor xd1, xtmph1
241
242 XLDR xd2, [dest2+pos] ;reuse xtmpl1. Get next dest vector
243 XLDR xd3, [dest3+pos] ;reuse xtmph1. Get next dest3 vector
244
245 ;dest2
246 pshufb xtmph2, x0 ;Lookup mul table of high nibble
247 pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
248 pxor xtmph2, xtmpl2 ;GF add high and low partials
249 pxor xd2, xtmph2
250
251 ;dest3
252 pshufb xtmph3, x0 ;Lookup mul table of high nibble
253 pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
254 pxor xtmph3, xtmpl3 ;GF add high and low partials
255 pxor xd3, xtmph3
256
257 XSTR [dest1+pos], xd1 ;Store result into dest1
258 XSTR [dest2+pos], xd2 ;Store result into dest2
259 XSTR [dest3+pos], xd3 ;Store result into dest3
260
261 movdqa xtmph1, xgft4_hi ;Reload const array registers
262 movdqa xtmpl1, xgft4_lo ;Reload const array registers
263 movdqa xtmph2, xgft5_hi ;Reload const array registers
264 movdqa xtmpl2, xgft5_lo ;Reload const array registers
265 movdqa xtmph3, xgft6_hi ;Reload const array registers
266 movdqa xtmpl3, xgft6_lo ;Reload const array registers
267
268 ;dest4
269 XLDR xd1, [dest4+pos] ;Get next dest vector
270 pshufb xtmph1, x0 ;Lookup mul table of high nibble
271 pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
272 pxor xtmph1, xtmpl1 ;GF add high and low partials
273 pxor xd1, xtmph1
274
275 XLDR xd2, [dest5+pos] ;reuse xtmpl1. Get next dest vector
276 XLDR xd3, [dest6+pos] ;reuse xtmph1. Get next dest vector
277
278 ;dest5
279 pshufb xtmph2, x0 ;Lookup mul table of high nibble
280 pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
281 pxor xtmph2, xtmpl2 ;GF add high and low partials
282 pxor xd2, xtmph2
283
284 ;dest6
285 pshufb xtmph3, x0 ;Lookup mul table of high nibble
286 pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
287 pxor xtmph3, xtmpl3 ;GF add high and low partials
288 pxor xd3, xtmph3
289
290 XSTR [dest4+pos], xd1 ;Store result into dest4
291 XSTR [dest5+pos], xd2 ;Store result into dest5
292 XSTR [dest6+pos], xd3 ;Store result into dest6
293
294 add pos, 16 ;Loop on 16 bytes at a time
295 cmp pos, len
296 jle .loop16
297
298 lea tmp, [len + 16]
299 cmp pos, tmp
300 je .return_pass
301
302.lessthan16:
303 ;; Tail len
304 ;; Do one more overlap pass
305 ;; Overlapped offset length-16
306 mov tmp, len ;Backup len as len=rdi
307
308 XLDR x0, [src+tmp] ;Get next source vector
309 XLDR xd1, [dest4+tmp] ;Get next dest vector
310 XLDR xd2, [dest5+tmp] ;reuse xtmpl1. Get next dest vector
311 XLDR xd3, [dest6+tmp] ;reuse xtmph1. Get next dest vector
312
313 sub len, pos
314
315 movdqa xtmph3, [constip16] ;Load const of i + 16
316 pinsrb xtmpl3, len.w, 15
317 pshufb xtmpl3, xmask0f ;Broadcast len to all bytes
318 pcmpgtb xtmpl3, xtmph3
319
320 movdqa xtmpa, x0 ;Keep unshifted copy of src
321 psraw x0, 4 ;Shift to put high nibble into bits 4-0
322 pand x0, xmask0f ;Mask high src nibble in bits 4-0
323 pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
324
325 ;dest4
326 pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
327 pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
328 pxor xgft4_hi, xgft4_lo ;GF add high and low partials
329 pand xgft4_hi, xtmpl3
330 pxor xd1, xgft4_hi
331
332 ;dest5
333 pshufb xgft5_hi, x0 ;Lookup mul table of high nibble
334 pshufb xgft5_lo, xtmpa ;Lookup mul table of low nibble
335 pxor xgft5_hi, xgft5_lo ;GF add high and low partials
336 pand xgft5_hi, xtmpl3
337 pxor xd2, xgft5_hi
338
339 ;dest6
340 pshufb xgft6_hi, x0 ;Lookup mul table of high nibble
341 pshufb xgft6_lo, xtmpa ;Lookup mul table of low nibble
342 pxor xgft6_hi, xgft6_lo ;GF add high and low partials
343 pand xgft6_hi, xtmpl3
344 pxor xd3, xgft6_hi
345
346 XSTR [dest4+tmp], xd1 ;Store result into dest4
347 XSTR [dest5+tmp], xd2 ;Store result into dest5
348 XSTR [dest6+tmp], xd3 ;Store result into dest6
349
350 movdqu xgft4_lo, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
351 movdqu xgft4_hi, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
352 movdqu xgft5_lo, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
353 movdqu xgft5_hi, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
354 movdqu xgft6_lo, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
355 movdqu xgft6_hi, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
356 XLDR xd1, [dest1+tmp] ;Get next dest vector
357 XLDR xd2, [dest2+tmp] ;reuse xtmpl1. Get next dest vector
358 XLDR xd3, [dest3+tmp] ;reuse xtmph1. Get next dest3 vector
359
360 ;dest1
361 pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
362 pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
363 pxor xgft4_hi, xgft4_lo ;GF add high and low partials
364 pand xgft4_hi, xtmpl3
365 pxor xd1, xgft4_hi
366
367 ;dest2
368 pshufb xgft5_hi, x0 ;Lookup mul table of high nibble
369 pshufb xgft5_lo, xtmpa ;Lookup mul table of low nibble
370 pxor xgft5_hi, xgft5_lo ;GF add high and low partials
371 pand xgft5_hi, xtmpl3
372 pxor xd2, xgft5_hi
373
374 ;dest3
375 pshufb xgft6_hi, x0 ;Lookup mul table of high nibble
376 pshufb xgft6_lo, xtmpa ;Lookup mul table of low nibble
377 pxor xgft6_hi, xgft6_lo ;GF add high and low partials
378 pand xgft6_hi, xtmpl3
379 pxor xd3, xgft6_hi
380
381 XSTR [dest1+tmp], xd1 ;Store result into dest1
382 XSTR [dest2+tmp], xd2 ;Store result into dest2
383 XSTR [dest3+tmp], xd3 ;Store result into dest3
384
385.return_pass:
386 FUNC_RESTORE
387 mov return, 0
388 ret
389
390.return_fail:
391 FUNC_RESTORE
392 mov return, 1
393 ret
394
395endproc_frame
396
397section .data
398
399align 16
400
401mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
402constip16:
403 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
404
405;;; func core, ver, snum
406slversion gf_6vect_mad_sse, 00, 01, 020f