]>
Commit | Line | Data |
---|---|---|
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
2 | ; Copyright(c) 2011-2015 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
5 | ; modification, are permitted provided that the following conditions | |
6 | ; are met: | |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | ;;; | |
31 | ;;; gf_6vect_dot_prod_avx2(len, vec, *g_tbls, **buffs, **dests); | |
32 | ;;; | |
33 | ||
34 | %include "reg_sizes.asm" | |
35 | ||
36 | %ifidn __OUTPUT_FORMAT__, elf64 | |
37 | %define arg0 rdi | |
38 | %define arg1 rsi | |
39 | %define arg2 rdx | |
40 | %define arg3 rcx | |
41 | %define arg4 r8 | |
42 | %define arg5 r9 | |
43 | ||
44 | %define tmp r11 | |
45 | %define tmp.w r11d | |
46 | %define tmp.b r11b | |
47 | %define tmp2 r10 | |
48 | %define tmp3 r13 ; must be saved and restored | |
49 | %define tmp4 r12 ; must be saved and restored | |
50 | %define tmp5 r14 ; must be saved and restored | |
51 | %define tmp6 r15 ; must be saved and restored | |
52 | %define return rax | |
53 | %define PS 8 | |
54 | %define LOG_PS 3 | |
55 | ||
56 | %define func(x) x: | |
57 | %macro FUNC_SAVE 0 | |
58 | push r12 | |
59 | push r13 | |
60 | push r14 | |
61 | push r15 | |
62 | %endmacro | |
63 | %macro FUNC_RESTORE 0 | |
64 | pop r15 | |
65 | pop r14 | |
66 | pop r13 | |
67 | pop r12 | |
68 | %endmacro | |
69 | %endif | |
70 | ||
71 | %ifidn __OUTPUT_FORMAT__, win64 | |
72 | %define arg0 rcx | |
73 | %define arg1 rdx | |
74 | %define arg2 r8 | |
75 | %define arg3 r9 | |
76 | ||
77 | %define arg4 r12 ; must be saved, loaded and restored | |
78 | %define arg5 r15 ; must be saved and restored | |
79 | %define tmp r11 | |
80 | %define tmp.w r11d | |
81 | %define tmp.b r11b | |
82 | %define tmp2 r10 | |
83 | %define tmp3 r13 ; must be saved and restored | |
84 | %define tmp4 r14 ; must be saved and restored | |
85 | %define tmp5 rdi ; must be saved and restored | |
86 | %define tmp6 rsi ; must be saved and restored | |
87 | %define return rax | |
88 | %define PS 8 | |
89 | %define LOG_PS 3 | |
90 | %define stack_size 10*16 + 7*8 ; must be an odd multiple of 8 | |
91 | %define arg(x) [rsp + stack_size + PS + PS*x] | |
92 | ||
93 | %define func(x) proc_frame x | |
94 | %macro FUNC_SAVE 0 | |
95 | alloc_stack stack_size | |
96 | vmovdqa [rsp + 0*16], xmm6 | |
97 | vmovdqa [rsp + 1*16], xmm7 | |
98 | vmovdqa [rsp + 2*16], xmm8 | |
99 | vmovdqa [rsp + 3*16], xmm9 | |
100 | vmovdqa [rsp + 4*16], xmm10 | |
101 | vmovdqa [rsp + 5*16], xmm11 | |
102 | vmovdqa [rsp + 6*16], xmm12 | |
103 | vmovdqa [rsp + 7*16], xmm13 | |
104 | vmovdqa [rsp + 8*16], xmm14 | |
105 | vmovdqa [rsp + 9*16], xmm15 | |
106 | save_reg r12, 10*16 + 0*8 | |
107 | save_reg r13, 10*16 + 1*8 | |
108 | save_reg r14, 10*16 + 2*8 | |
109 | save_reg r15, 10*16 + 3*8 | |
110 | save_reg rdi, 10*16 + 4*8 | |
111 | save_reg rsi, 10*16 + 5*8 | |
112 | end_prolog | |
113 | mov arg4, arg(4) | |
114 | %endmacro | |
115 | ||
116 | %macro FUNC_RESTORE 0 | |
117 | vmovdqa xmm6, [rsp + 0*16] | |
118 | vmovdqa xmm7, [rsp + 1*16] | |
119 | vmovdqa xmm8, [rsp + 2*16] | |
120 | vmovdqa xmm9, [rsp + 3*16] | |
121 | vmovdqa xmm10, [rsp + 4*16] | |
122 | vmovdqa xmm11, [rsp + 5*16] | |
123 | vmovdqa xmm12, [rsp + 6*16] | |
124 | vmovdqa xmm13, [rsp + 7*16] | |
125 | vmovdqa xmm14, [rsp + 8*16] | |
126 | vmovdqa xmm15, [rsp + 9*16] | |
127 | mov r12, [rsp + 10*16 + 0*8] | |
128 | mov r13, [rsp + 10*16 + 1*8] | |
129 | mov r14, [rsp + 10*16 + 2*8] | |
130 | mov r15, [rsp + 10*16 + 3*8] | |
131 | mov rdi, [rsp + 10*16 + 4*8] | |
132 | mov rsi, [rsp + 10*16 + 5*8] | |
133 | add rsp, stack_size | |
134 | %endmacro | |
135 | %endif | |
136 | ||
137 | %define len arg0 | |
138 | %define vec arg1 | |
139 | %define mul_array arg2 | |
140 | %define src arg3 | |
141 | %define dest arg4 | |
142 | %define ptr arg5 | |
143 | %define vec_i tmp2 | |
144 | %define dest1 tmp3 | |
145 | %define dest2 tmp4 | |
146 | %define vskip1 tmp5 | |
147 | %define vskip3 tmp6 | |
148 | %define pos return | |
149 | ||
150 | ||
151 | %ifndef EC_ALIGNED_ADDR | |
152 | ;;; Use Un-aligned load/store | |
153 | %define XLDR vmovdqu | |
154 | %define XSTR vmovdqu | |
155 | %else | |
156 | ;;; Use Non-temporal load/stor | |
157 | %ifdef NO_NT_LDST | |
158 | %define XLDR vmovdqa | |
159 | %define XSTR vmovdqa | |
160 | %else | |
161 | %define XLDR vmovntdqa | |
162 | %define XSTR vmovntdq | |
163 | %endif | |
164 | %endif | |
165 | ||
166 | ||
167 | default rel | |
168 | ||
169 | [bits 64] | |
170 | section .text | |
171 | ||
172 | %define xmask0f ymm15 | |
173 | %define xmask0fx xmm15 | |
174 | %define xgft1_lo ymm14 | |
175 | %define xgft1_hi ymm13 | |
176 | %define xgft2_lo ymm12 | |
177 | %define xgft2_hi ymm11 | |
178 | %define xgft3_lo ymm10 | |
179 | %define xgft3_hi ymm9 | |
180 | %define x0 ymm0 | |
181 | %define xtmpa ymm1 | |
182 | %define xp1 ymm2 | |
183 | %define xp2 ymm3 | |
184 | %define xp3 ymm4 | |
185 | %define xp4 ymm5 | |
186 | %define xp5 ymm6 | |
187 | %define xp6 ymm7 | |
188 | ||
189 | align 16 | |
190 | global gf_6vect_dot_prod_avx2:function | |
191 | func(gf_6vect_dot_prod_avx2) | |
192 | FUNC_SAVE | |
193 | sub len, 32 | |
194 | jl .return_fail | |
195 | xor pos, pos | |
196 | mov tmp.b, 0x0f | |
197 | vpinsrb xmask0fx, xmask0fx, tmp.w, 0 | |
198 | vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f... | |
199 | mov vskip1, vec | |
200 | imul vskip1, 32 | |
201 | mov vskip3, vec | |
202 | imul vskip3, 96 | |
203 | sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS | |
204 | mov dest1, [dest] | |
205 | mov dest2, [dest+PS] | |
206 | ||
207 | ||
208 | .loop32: | |
209 | mov tmp, mul_array | |
210 | xor vec_i, vec_i | |
211 | vpxor xp1, xp1 | |
212 | vpxor xp2, xp2 | |
213 | vpxor xp3, xp3 | |
214 | vpxor xp4, xp4 | |
215 | vpxor xp5, xp5 | |
216 | vpxor xp6, xp6 | |
217 | ||
218 | .next_vect: | |
219 | mov ptr, [src+vec_i] | |
220 | XLDR x0, [ptr+pos] ;Get next source vector | |
221 | add vec_i, PS | |
222 | ||
223 | vpand xgft3_lo, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
224 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
225 | vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
226 | vperm2i128 xtmpa, xgft3_lo, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi | |
227 | vperm2i128 x0, xgft3_lo, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo | |
228 | ||
229 | vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f} | |
230 | ; " Ax{00}, Ax{10}, ..., Ax{f0} | |
231 | vmovdqu xgft2_lo, [tmp+vskip1*1] ;Load array Bx{00}, Bx{01}, ..., Bx{0f} | |
232 | ; " Bx{00}, Bx{10}, ..., Bx{f0} | |
233 | vmovdqu xgft3_lo, [tmp+vskip1*2] ;Load array Cx{00}, Cx{01}, ..., Cx{0f} | |
234 | ; " Cx{00}, Cx{10}, ..., Cx{f0} | |
235 | lea ptr, [vskip1 + vskip1*4] ;ptr = vskip5 | |
236 | ||
237 | vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo | |
238 | vperm2i128 xgft2_hi, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo | |
239 | vperm2i128 xgft3_hi, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo | |
240 | ||
241 | vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble | |
242 | vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
243 | vpxor xgft1_hi, xgft1_lo ;GF add high and low partials | |
244 | vpxor xp1, xgft1_hi ;xp1 += partial | |
245 | ||
246 | vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble | |
247 | vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble | |
248 | vpxor xgft2_hi, xgft2_lo ;GF add high and low partials | |
249 | vpxor xp2, xgft2_hi ;xp2 += partial | |
250 | ||
251 | vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble | |
252 | vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble | |
253 | vpxor xgft3_hi, xgft3_lo ;GF add high and low partials | |
254 | vpxor xp3, xgft3_hi ;xp3 += partial | |
255 | ||
256 | ||
257 | vmovdqu xgft1_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f} | |
258 | ; " Dx{00}, Dx{10}, ..., Dx{f0} | |
259 | vmovdqu xgft2_lo, [tmp+vskip1*4] ;Load array Ex{00}, Ex{01}, ..., Ex{0f} | |
260 | ; " Ex{00}, Ex{10}, ..., Ex{f0} | |
261 | vmovdqu xgft3_lo, [tmp+ptr] ;Load array Fx{00}, Fx{01}, ..., Fx{0f} | |
262 | ; " Fx{00}, Fx{10}, ..., Fx{f0} | |
263 | add tmp, 32 | |
264 | vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo | |
265 | vperm2i128 xgft2_hi, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo | |
266 | vperm2i128 xgft3_hi, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo | |
267 | ||
268 | vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble | |
269 | vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
270 | vpxor xgft1_hi, xgft1_lo ;GF add high and low partials | |
271 | vpxor xp4, xgft1_hi ;xp4 += partial | |
272 | ||
273 | vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble | |
274 | vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble | |
275 | vpxor xgft2_hi, xgft2_lo ;GF add high and low partials | |
276 | vpxor xp5, xgft2_hi ;xp5 += partial | |
277 | ||
278 | vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble | |
279 | vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble | |
280 | vpxor xgft3_hi, xgft3_lo ;GF add high and low partials | |
281 | vpxor xp6, xgft3_hi ;xp6 += partial | |
282 | ||
283 | cmp vec_i, vec | |
284 | jl .next_vect | |
285 | ||
286 | ||
287 | mov tmp, [dest+2*PS] | |
288 | mov ptr, [dest+3*PS] | |
289 | mov vec_i, [dest+4*PS] | |
290 | ||
291 | XSTR [dest1+pos], xp1 | |
292 | XSTR [dest2+pos], xp2 | |
293 | XSTR [tmp+pos], xp3 | |
294 | mov tmp, [dest+5*PS] | |
295 | XSTR [ptr+pos], xp4 | |
296 | XSTR [vec_i+pos], xp5 | |
297 | XSTR [tmp+pos], xp6 | |
298 | ||
299 | add pos, 32 ;Loop on 32 bytes at a time | |
300 | cmp pos, len | |
301 | jle .loop32 | |
302 | ||
303 | lea tmp, [len + 32] | |
304 | cmp pos, tmp | |
305 | je .return_pass | |
306 | ||
307 | ;; Tail len | |
308 | mov pos, len ;Overlapped offset length-16 | |
309 | jmp .loop32 ;Do one more overlap pass | |
310 | ||
311 | .return_pass: | |
312 | FUNC_RESTORE | |
313 | mov return, 0 | |
314 | ret | |
315 | ||
316 | .return_fail: | |
317 | FUNC_RESTORE | |
318 | mov return, 1 | |
319 | ret | |
320 | ||
321 | endproc_frame | |
322 | ||
323 | section .data | |
324 | ||
325 | ;;; func core, ver, snum | |
326 | slversion gf_6vect_dot_prod_avx2, 04, 04, 019a |