]> git.proxmox.com Git - ceph.git/blame - ceph/src/erasure-code/isa/isa-l/erasure_code/gf_3vect_dot_prod_sse.asm.s
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / erasure-code / isa / isa-l / erasure_code / gf_3vect_dot_prod_sse.asm.s
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;;
31;;; gf_3vect_dot_prod_sse(len, vec, *g_tbls, **buffs, **dests);
32;;;
33
34%include "reg_sizes.asm"
35
36%ifidn __OUTPUT_FORMAT__, elf64
37 %define arg0 rdi
38 %define arg1 rsi
39 %define arg2 rdx
40 %define arg3 rcx
41 %define arg4 r8
42 %define arg5 r9
43
44 %define tmp r11
45 %define tmp2 r10
46 %define tmp3 r13 ; must be saved and restored
47 %define tmp4 r12 ; must be saved and restored
48 %define return rax
49 %macro SLDR 2
50 %endmacro
51 %define SSTR SLDR
52 %define PS 8
53 %define LOG_PS 3
54
55 %define func(x) x:
56 %macro FUNC_SAVE 0
57 push r12
58 push r13
59 %endmacro
60 %macro FUNC_RESTORE 0
61 pop r13
62 pop r12
63 %endmacro
64%endif
65
66%ifidn __OUTPUT_FORMAT__, win64
67 %define arg0 rcx
68 %define arg1 rdx
69 %define arg2 r8
70 %define arg3 r9
71
72 %define arg4 r12 ; must be saved, loaded and restored
73 %define arg5 r15 ; must be saved and restored
74 %define tmp r11
75 %define tmp2 r10
76 %define tmp3 r13 ; must be saved and restored
77 %define tmp4 r14 ; must be saved and restored
78 %define return rax
79 %macro SLDR 2
80 %endmacro
81 %define SSTR SLDR
82 %define PS 8
83 %define LOG_PS 3
84 %define stack_size 6*16 + 5*8 ; must be an odd multiple of 8
85 %define arg(x) [rsp + stack_size + PS + PS*x]
86
87 %define func(x) proc_frame x
88 %macro FUNC_SAVE 0
89 alloc_stack stack_size
90 save_xmm128 xmm6, 0*16
91 save_xmm128 xmm7, 1*16
92 save_xmm128 xmm8, 2*16
93 save_xmm128 xmm9, 3*16
94 save_xmm128 xmm10, 4*16
95 save_xmm128 xmm11, 5*16
96 save_reg r12, 6*16 + 0*8
97 save_reg r13, 6*16 + 1*8
98 save_reg r14, 6*16 + 2*8
99 save_reg r15, 6*16 + 3*8
100 end_prolog
101 mov arg4, arg(4)
102 %endmacro
103
104 %macro FUNC_RESTORE 0
105 movdqa xmm6, [rsp + 0*16]
106 movdqa xmm7, [rsp + 1*16]
107 movdqa xmm8, [rsp + 2*16]
108 movdqa xmm9, [rsp + 3*16]
109 movdqa xmm10, [rsp + 4*16]
110 movdqa xmm11, [rsp + 5*16]
111 mov r12, [rsp + 6*16 + 0*8]
112 mov r13, [rsp + 6*16 + 1*8]
113 mov r14, [rsp + 6*16 + 2*8]
114 mov r15, [rsp + 6*16 + 3*8]
115 add rsp, stack_size
116 %endmacro
117%endif
118
119%ifidn __OUTPUT_FORMAT__, elf32
120
121;;;================== High Address;
122;;; arg4
123;;; arg3
124;;; arg2
125;;; arg1
126;;; arg0
127;;; return
128;;;<================= esp of caller
129;;; ebp
130;;;<================= ebp = esp
131;;; var0
132;;; var1
133;;; esi
134;;; edi
135;;; ebx
136;;;<================= esp of callee
137;;;
138;;;================== Low Address;
139
140 %define PS 4
141 %define LOG_PS 2
142 %define func(x) x:
143 %define arg(x) [ebp + PS*2 + PS*x]
144 %define var(x) [ebp - PS - PS*x]
145
146 %define trans ecx
147 %define trans2 esi
148 %define arg0 trans ;trans and trans2 are for the variables in stack
149 %define arg0_m arg(0)
150 %define arg1 ebx
151 %define arg2 arg2_m
152 %define arg2_m arg(2)
153 %define arg3 trans
154 %define arg3_m arg(3)
155 %define arg4 trans
156 %define arg4_m arg(4)
157 %define arg5 trans2
158 %define tmp edx
159 %define tmp2 edi
160 %define tmp3 trans2
161 %define tmp3_m var(0)
162 %define tmp4 trans2
163 %define tmp4_m var(1)
164 %define return eax
165 %macro SLDR 2 ;; stack load/restore
166 mov %1, %2
167 %endmacro
168 %define SSTR SLDR
169
170 %macro FUNC_SAVE 0
171 push ebp
172 mov ebp, esp
173 sub esp, PS*2 ;2 local variables
174 push esi
175 push edi
176 push ebx
177 mov arg1, arg(1)
178 %endmacro
179
180 %macro FUNC_RESTORE 0
181 pop ebx
182 pop edi
183 pop esi
184 add esp, PS*2 ;2 local variables
185 pop ebp
186 %endmacro
187
188%endif ; output formats
189
190%define len arg0
191%define vec arg1
192%define mul_array arg2
193%define src arg3
194%define dest1 arg4
195%define ptr arg5
196
197%define vec_i tmp2
198%define dest2 tmp3
199%define dest3 tmp4
200%define pos return
201
202 %ifidn PS,4 ;32-bit code
203 %define len_m arg0_m
204 %define src_m arg3_m
205 %define dest1_m arg4_m
206 %define dest2_m tmp3_m
207 %define dest3_m tmp4_m
208 %endif
209
210%ifndef EC_ALIGNED_ADDR
211;;; Use Un-aligned load/store
212 %define XLDR movdqu
213 %define XSTR movdqu
214%else
215;;; Use Non-temporal load/stor
216 %ifdef NO_NT_LDST
217 %define XLDR movdqa
218 %define XSTR movdqa
219 %else
220 %define XLDR movntdqa
221 %define XSTR movntdq
222 %endif
223%endif
224
225%ifidn PS,8 ; 64-bit code
226 default rel
227 [bits 64]
228%endif
229
230
231section .text
232
233%ifidn PS,8 ;64-bit code
234 %define xmask0f xmm11
235 %define xgft1_lo xmm2
236 %define xgft1_hi xmm3
237 %define xgft2_lo xmm4
238 %define xgft2_hi xmm7
239 %define xgft3_lo xmm6
240 %define xgft3_hi xmm5
241
242 %define x0 xmm0
243 %define xtmpa xmm1
244 %define xp1 xmm10
245 %define xp2 xmm9
246 %define xp3 xmm8
247%else
248 %define xmask0f xmm7
249 %define xgft1_lo xmm6
250 %define xgft1_hi xmm5
251 %define xgft2_lo xgft1_lo
252 %define xgft2_hi xgft1_hi
253 %define xgft3_lo xgft1_lo
254 %define xgft3_hi xgft1_hi
255
256 %define x0 xmm0
257 %define xtmpa xmm1
258 %define xp1 xmm2
259 %define xp2 xmm3
260 %define xp3 xmm4
261%endif
262
263align 16
264global gf_3vect_dot_prod_sse:function
265func(gf_3vect_dot_prod_sse)
266 FUNC_SAVE
267 SLDR len, len_m
268 sub len, 16
269 SSTR len_m, len
270 jl .return_fail
271 xor pos, pos
272 movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
273 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
274 SLDR dest1, dest1_m
275 mov dest2, [dest1+PS]
276 SSTR dest2_m, dest2
277 mov dest3, [dest1+2*PS]
278 SSTR dest3_m, dest3
279 mov dest1, [dest1]
280 SSTR dest1_m, dest1
281
282.loop16:
283 pxor xp1, xp1
284 pxor xp2, xp2
285 pxor xp3, xp3
286 mov tmp, mul_array
287 xor vec_i, vec_i
288
289.next_vect:
290 SLDR src, src_m
291 mov ptr, [src+vec_i]
292
293 movdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
294 movdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
295 %ifidn PS,8 ;64-bit code
296 movdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
297 movdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
298 movdqu xgft3_lo, [tmp+vec*(64/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
299 movdqu xgft3_hi, [tmp+vec*(64/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
300 add tmp, 32
301 add vec_i, PS
302 %endif
303 XLDR x0, [ptr+pos] ;Get next source vector
304
305 movdqa xtmpa, x0 ;Keep unshifted copy of src
306 psraw x0, 4 ;Shift to put high nibble into bits 4-0
307 pand x0, xmask0f ;Mask high src nibble in bits 4-0
308 pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
309
310 pshufb xgft1_hi, x0 ;Lookup mul table of high nibble
311 pshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
312 pxor xgft1_hi, xgft1_lo ;GF add high and low partials
313 pxor xp1, xgft1_hi ;xp1 += partial
314
315 %ifidn PS,4 ;32-bit code
316 movdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
317 movdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
318 %endif
319 pshufb xgft2_hi, x0 ;Lookup mul table of high nibble
320 pshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
321 pxor xgft2_hi, xgft2_lo ;GF add high and low partials
322 pxor xp2, xgft2_hi ;xp2 += partial
323
324 %ifidn PS,4 ;32-bit code
325 sal vec, 1
326 movdqu xgft3_lo, [tmp+vec*(32/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
327 movdqu xgft3_hi, [tmp+vec*(32/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
328 sar vec, 1
329 add tmp, 32
330 add vec_i, PS
331 %endif
332 pshufb xgft3_hi, x0 ;Lookup mul table of high nibble
333 pshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
334 pxor xgft3_hi, xgft3_lo ;GF add high and low partials
335 pxor xp3, xgft3_hi ;xp3 += partial
336
337 cmp vec_i, vec
338 jl .next_vect
339
340 SLDR dest1, dest1_m
341 SLDR dest2, dest2_m
342 XSTR [dest1+pos], xp1
343 XSTR [dest2+pos], xp2
344 SLDR dest3, dest3_m
345 XSTR [dest3+pos], xp3
346
347 SLDR len, len_m
348 add pos, 16 ;Loop on 16 bytes at a time
349 cmp pos, len
350 jle .loop16
351
352 lea tmp, [len + 16]
353 cmp pos, tmp
354 je .return_pass
355
356 ;; Tail len
357 mov pos, len ;Overlapped offset length-16
358 jmp .loop16 ;Do one more overlap pass
359
360.return_pass:
361 mov return, 0
362 FUNC_RESTORE
363 ret
364
365.return_fail:
366 mov return, 1
367 FUNC_RESTORE
368 ret
369
370endproc_frame
371
372section .data
373
374align 16
375mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
376
377;;; func core, ver, snum
378slversion gf_3vect_dot_prod_sse, 00, 06, 0063