]> git.proxmox.com Git - ceph.git/blame - ceph/src/erasure-code/isa/isa-l/erasure_code/gf_6vect_dot_prod_avx.asm.s
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / erasure-code / isa / isa-l / erasure_code / gf_6vect_dot_prod_avx.asm.s
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;;;
31;;; gf_6vect_dot_prod_avx(len, vec, *g_tbls, **buffs, **dests);
32;;;
33
34%include "reg_sizes.asm"
35
36%ifidn __OUTPUT_FORMAT__, elf64
37 %define arg0 rdi
38 %define arg1 rsi
39 %define arg2 rdx
40 %define arg3 rcx
41 %define arg4 r8
42 %define arg5 r9
43
44 %define tmp r11
45 %define tmp2 r10
46 %define tmp3 r13 ; must be saved and restored
47 %define tmp4 r12 ; must be saved and restored
48 %define tmp5 r14 ; must be saved and restored
49 %define tmp6 r15 ; must be saved and restored
50 %define return rax
51 %define PS 8
52 %define LOG_PS 3
53
54 %define func(x) x:
55 %macro FUNC_SAVE 0
56 push r12
57 push r13
58 push r14
59 push r15
60 %endmacro
61 %macro FUNC_RESTORE 0
62 pop r15
63 pop r14
64 pop r13
65 pop r12
66 %endmacro
67%endif
68
69%ifidn __OUTPUT_FORMAT__, win64
70 %define arg0 rcx
71 %define arg1 rdx
72 %define arg2 r8
73 %define arg3 r9
74
75 %define arg4 r12 ; must be saved, loaded and restored
76 %define arg5 r15 ; must be saved and restored
77 %define tmp r11
78 %define tmp2 r10
79 %define tmp3 r13 ; must be saved and restored
80 %define tmp4 r14 ; must be saved and restored
81 %define tmp5 rdi ; must be saved and restored
82 %define tmp6 rsi ; must be saved and restored
83 %define return rax
84 %define PS 8
85 %define LOG_PS 3
86 %define stack_size 10*16 + 7*8 ; must be an odd multiple of 8
87 %define arg(x) [rsp + stack_size + PS + PS*x]
88
89 %define func(x) proc_frame x
90 %macro FUNC_SAVE 0
91 alloc_stack stack_size
92 save_xmm128 xmm6, 0*16
93 save_xmm128 xmm7, 1*16
94 save_xmm128 xmm8, 2*16
95 save_xmm128 xmm9, 3*16
96 save_xmm128 xmm10, 4*16
97 save_xmm128 xmm11, 5*16
98 save_xmm128 xmm12, 6*16
99 save_xmm128 xmm13, 7*16
100 save_xmm128 xmm14, 8*16
101 save_xmm128 xmm15, 9*16
102 save_reg r12, 10*16 + 0*8
103 save_reg r13, 10*16 + 1*8
104 save_reg r14, 10*16 + 2*8
105 save_reg r15, 10*16 + 3*8
106 save_reg rdi, 10*16 + 4*8
107 save_reg rsi, 10*16 + 5*8
108 end_prolog
109 mov arg4, arg(4)
110 %endmacro
111
112 %macro FUNC_RESTORE 0
113 vmovdqa xmm6, [rsp + 0*16]
114 vmovdqa xmm7, [rsp + 1*16]
115 vmovdqa xmm8, [rsp + 2*16]
116 vmovdqa xmm9, [rsp + 3*16]
117 vmovdqa xmm10, [rsp + 4*16]
118 vmovdqa xmm11, [rsp + 5*16]
119 vmovdqa xmm12, [rsp + 6*16]
120 vmovdqa xmm13, [rsp + 7*16]
121 vmovdqa xmm14, [rsp + 8*16]
122 vmovdqa xmm15, [rsp + 9*16]
123 mov r12, [rsp + 10*16 + 0*8]
124 mov r13, [rsp + 10*16 + 1*8]
125 mov r14, [rsp + 10*16 + 2*8]
126 mov r15, [rsp + 10*16 + 3*8]
127 mov rdi, [rsp + 10*16 + 4*8]
128 mov rsi, [rsp + 10*16 + 5*8]
129 add rsp, stack_size
130 %endmacro
131%endif
132
133%define len arg0
134%define vec arg1
135%define mul_array arg2
136%define src arg3
137%define dest arg4
138%define ptr arg5
139%define vec_i tmp2
140%define dest1 tmp3
141%define dest2 tmp4
142%define vskip1 tmp5
143%define vskip3 tmp6
144%define pos return
145
146
147%ifndef EC_ALIGNED_ADDR
148;;; Use Un-aligned load/store
149 %define XLDR vmovdqu
150 %define XSTR vmovdqu
151%else
152;;; Use Non-temporal load/stor
153 %ifdef NO_NT_LDST
154 %define XLDR vmovdqa
155 %define XSTR vmovdqa
156 %else
157 %define XLDR vmovntdqa
158 %define XSTR vmovntdq
159 %endif
160%endif
161
162
163default rel
164
165[bits 64]
166section .text
167
168%define xmask0f xmm15
169%define xgft1_lo xmm14
170%define xgft1_hi xmm13
171%define xgft2_lo xmm12
172%define xgft2_hi xmm11
173%define xgft3_lo xmm10
174%define xgft3_hi xmm9
175%define x0 xmm0
176%define xtmpa xmm1
177%define xp1 xmm2
178%define xp2 xmm3
179%define xp3 xmm4
180%define xp4 xmm5
181%define xp5 xmm6
182%define xp6 xmm7
183
184align 16
185global gf_6vect_dot_prod_avx:function
186func(gf_6vect_dot_prod_avx)
187 FUNC_SAVE
188 sub len, 16
189 jl .return_fail
190 xor pos, pos
191 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
192 mov vskip1, vec
193 imul vskip1, 32
194 mov vskip3, vec
195 imul vskip3, 96
196 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
197 mov dest1, [dest]
198 mov dest2, [dest+PS]
199
200
201.loop16:
202 mov tmp, mul_array
203 xor vec_i, vec_i
204 vpxor xp1, xp1
205 vpxor xp2, xp2
206 vpxor xp3, xp3
207 vpxor xp4, xp4
208 vpxor xp5, xp5
209 vpxor xp6, xp6
210
211.next_vect:
212 mov ptr, [src+vec_i]
213 add vec_i, PS
214 XLDR x0, [ptr+pos] ;Get next source vector
215
216 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
217 vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
218 vmovdqu xgft2_lo, [tmp+vskip1*1] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
219 vmovdqu xgft2_hi, [tmp+vskip1*1+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
220 vmovdqu xgft3_lo, [tmp+vskip1*2] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
221 vmovdqu xgft3_hi, [tmp+vskip1*2+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
222 lea ptr, [vskip1 + vskip1*4] ;ptr = vskip5
223
224 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
225 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
226 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
227
228
229 vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
230 vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
231 vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
232 vpxor xp1, xgft1_hi ;xp1 += partial
233
234 vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
235 vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
236 vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
237 vpxor xp2, xgft2_hi ;xp2 += partial
238
239 vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
240 vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
241 vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
242 vpxor xp3, xgft3_hi ;xp3 += partial
243
244
245 vmovdqu xgft1_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
246 vmovdqu xgft1_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
247 vmovdqu xgft2_lo, [tmp+vskip1*4] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
248 vmovdqu xgft2_hi, [tmp+vskip1*4+16] ; " Ex{00}, Ex{10}, ..., Ex{f0}
249 vmovdqu xgft3_lo, [tmp+ptr] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
250 vmovdqu xgft3_hi, [tmp+ptr+16] ; " Fx{00}, Fx{10}, ..., Fx{f0}
251 add tmp, 32
252
253
254 vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
255 vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
256 vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
257 vpxor xp4, xgft1_hi ;xp4 += partial
258
259 vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
260 vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
261 vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
262 vpxor xp5, xgft2_hi ;xp5 += partial
263
264 vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
265 vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
266 vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
267 vpxor xp6, xgft3_hi ;xp6 += partial
268
269 cmp vec_i, vec
270 jl .next_vect
271
272
273 mov tmp, [dest+2*PS]
274 mov ptr, [dest+3*PS]
275 mov vec_i, [dest+4*PS]
276
277 XSTR [dest1+pos], xp1
278 XSTR [dest2+pos], xp2
279 XSTR [tmp+pos], xp3
280 mov tmp, [dest+5*PS]
281 XSTR [ptr+pos], xp4
282 XSTR [vec_i+pos], xp5
283 XSTR [tmp+pos], xp6
284
285 add pos, 16 ;Loop on 16 bytes at a time
286 cmp pos, len
287 jle .loop16
288
289 lea tmp, [len + 16]
290 cmp pos, tmp
291 je .return_pass
292
293 ;; Tail len
294 mov pos, len ;Overlapped offset length-16
295 jmp .loop16 ;Do one more overlap pass
296
297.return_pass:
298 FUNC_RESTORE
299 mov return, 0
300 ret
301
302.return_fail:
303 FUNC_RESTORE
304 mov return, 1
305 ret
306
307endproc_frame
308
309section .data
310
311align 16
312mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
313
314;;; func core, ver, snum
315slversion gf_6vect_dot_prod_avx, 02, 04, 0195