]> git.proxmox.com Git - ceph.git/blob - ceph/src/isa-l/erasure_code/gf_2vect_dot_prod_avx512.asm
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / isa-l / erasure_code / gf_2vect_dot_prod_avx512.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 ;;;
31 ;;; gf_2vect_dot_prod_avx512(len, vec, *g_tbls, **buffs, **dests);
32 ;;;
33
34 %include "reg_sizes.asm"
35
36 %ifdef HAVE_AS_KNOWS_AVX512
37
38 %ifidn __OUTPUT_FORMAT__, elf64
39 %define arg0 rdi
40 %define arg1 rsi
41 %define arg2 rdx
42 %define arg3 rcx
43 %define arg4 r8
44 %define arg5 r9
45
46 %define tmp r11
47 %define tmp2 r10
48 %define tmp3 r12 ; must be saved and restored
49 %define return rax
50 %define PS 8
51 %define LOG_PS 3
52
53 %define func(x) x:
54 %macro FUNC_SAVE 0
55 push r12
56 %endmacro
57 %macro FUNC_RESTORE 0
58 pop r12
59 %endmacro
60 %endif
61
62 %ifidn __OUTPUT_FORMAT__, win64
63 %define arg0 rcx
64 %define arg1 rdx
65 %define arg2 r8
66 %define arg3 r9
67
68 %define arg4 r12 ; must be saved, loaded and restored
69 %define arg5 r15 ; must be saved and restored
70 %define tmp r11
71 %define tmp2 r10
72 %define tmp3 r13 ; must be saved and restored
73 %define return rax
74 %define PS 8
75 %define LOG_PS 3
76 %define stack_size 9*16 + 5*8 ; must be an odd multiple of 8
77 %define arg(x) [rsp + stack_size + PS + PS*x]
78
79 %define func(x) proc_frame x
80 %macro FUNC_SAVE 0
81 alloc_stack stack_size
82 vmovdqa [rsp + 0*16], xmm6
83 vmovdqa [rsp + 1*16], xmm7
84 vmovdqa [rsp + 2*16], xmm8
85 vmovdqa [rsp + 3*16], xmm9
86 vmovdqa [rsp + 4*16], xmm10
87 vmovdqa [rsp + 5*16], xmm11
88 vmovdqa [rsp + 6*16], xmm12
89 vmovdqa [rsp + 7*16], xmm13
90 vmovdqa [rsp + 8*16], xmm14
91 save_reg r12, 9*16 + 0*8
92 save_reg r13, 9*16 + 1*8
93 save_reg r14, 9*16 + 2*8
94 save_reg r15, 9*16 + 3*8
95 end_prolog
96 mov arg4, arg(4)
97 %endmacro
98
99 %macro FUNC_RESTORE 0
100 vmovdqa xmm6, [rsp + 0*16]
101 vmovdqa xmm7, [rsp + 1*16]
102 vmovdqa xmm8, [rsp + 2*16]
103 vmovdqa xmm9, [rsp + 3*16]
104 vmovdqa xmm10, [rsp + 4*16]
105 vmovdqa xmm11, [rsp + 5*16]
106 vmovdqa xmm12, [rsp + 6*16]
107 vmovdqa xmm13, [rsp + 7*16]
108 vmovdqa xmm14, [rsp + 8*16]
109 mov r12, [rsp + 9*16 + 0*8]
110 mov r13, [rsp + 9*16 + 1*8]
111 mov r14, [rsp + 9*16 + 2*8]
112 mov r15, [rsp + 9*16 + 3*8]
113 add rsp, stack_size
114 %endmacro
115 %endif
116
117
118 %define len arg0
119 %define vec arg1
120 %define mul_array arg2
121 %define src arg3
122 %define dest1 arg4
123 %define ptr arg5
124 %define vec_i tmp2
125 %define dest2 tmp3
126 %define pos return
127
128
129 %ifndef EC_ALIGNED_ADDR
130 ;;; Use Un-aligned load/store
131 %define XLDR vmovdqu8
132 %define XSTR vmovdqu8
133 %else
134 ;;; Use Non-temporal load/stor
135 %ifdef NO_NT_LDST
136 %define XLDR vmovdqa
137 %define XSTR vmovdqa
138 %else
139 %define XLDR vmovntdqa
140 %define XSTR vmovntdq
141 %endif
142 %endif
143
144 %define xmask0f zmm8
145 %define xgft1_lo zmm7
146 %define xgft1_loy ymm7
147 %define xgft1_hi zmm6
148 %define xgft2_lo zmm5
149 %define xgft2_loy ymm5
150 %define xgft2_hi zmm4
151
152 %define x0 zmm0
153 %define xtmpa zmm1
154 %define xp1 zmm2
155 %define xp2 zmm3
156
157 default rel
158 [bits 64]
159
160 section .text
161
162 align 16
163 global gf_2vect_dot_prod_avx512:function
164 func(gf_2vect_dot_prod_avx512)
165 FUNC_SAVE
166 sub len, 64
167 jl .return_fail
168
169 xor pos, pos
170 mov tmp, 0x0f
171 vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
172 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
173 mov dest2, [dest1+PS]
174 mov dest1, [dest1]
175
176 .loop64:
177 vpxorq xp1, xp1, xp1
178 vpxorq xp2, xp2, xp2
179 mov tmp, mul_array
180 xor vec_i, vec_i
181
182 .next_vect:
183 mov ptr, [src+vec_i]
184 XLDR x0, [ptr+pos] ;Get next source vector
185 add vec_i, PS
186
187 vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
188 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
189 vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
190
191 vmovdqu8 xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
192 vmovdqu8 xgft2_loy, [tmp+vec*(32/PS)] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
193 add tmp, 32
194
195 vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
196 vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
197 vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
198 vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
199
200 vpshufb xgft1_hi, xgft1_hi, x0 ;Lookup mul table of high nibble
201 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
202 vpxorq xgft1_hi, xgft1_hi, xgft1_lo ;GF add high and low partials
203 vpxorq xp1, xp1, xgft1_hi ;xp1 += partial
204
205 vpshufb xgft2_hi, xgft2_hi, x0 ;Lookup mul table of high nibble
206 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
207 vpxorq xgft2_hi, xgft2_hi, xgft2_lo ;GF add high and low partials
208 vpxorq xp2, xp2, xgft2_hi ;xp2 += partial
209
210 cmp vec_i, vec
211 jl .next_vect
212
213 XSTR [dest1+pos], xp1
214 XSTR [dest2+pos], xp2
215
216 add pos, 64 ;Loop on 64 bytes at a time
217 cmp pos, len
218 jle .loop64
219
220 lea tmp, [len + 64]
221 cmp pos, tmp
222 je .return_pass
223
224 ;; Tail len
225 mov pos, len ;Overlapped offset length-64
226 jmp .loop64 ;Do one more overlap pass
227
228 .return_pass:
229 mov return, 0
230 FUNC_RESTORE
231 ret
232
233 .return_fail:
234 mov return, 1
235 FUNC_RESTORE
236 ret
237
238 endproc_frame
239
240 %else
241 %ifidn __OUTPUT_FORMAT__, win64
242 global no_gf_2vect_dot_prod_avx512
243 no_gf_2vect_dot_prod_avx512:
244 %endif
245 %endif ; ifdef HAVE_AS_KNOWS_AVX512