]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
2 | ; Copyright(c) 2011-2015 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
5 | ; modification, are permitted provided that the following conditions | |
6 | ; are met: | |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | ;;; | |
31 | ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest); | |
32 | ;;; | |
33 | ||
34 | %include "reg_sizes.asm" | |
35 | ||
36 | %define PS 8 | |
37 | ||
38 | %ifidn __OUTPUT_FORMAT__, win64 | |
39 | %define arg0 rcx | |
40 | %define arg0.w ecx | |
41 | %define arg1 rdx | |
42 | %define arg2 r8 | |
43 | %define arg3 r9 | |
44 | %define arg4 r12 ; must be saved, loaded and restored | |
45 | %define arg5 r15 ; must be saved and restored | |
46 | ||
47 | %define tmp r11 | |
48 | %define tmp.w r11d | |
49 | %define tmp.b r11b | |
50 | %define return rax | |
51 | %define return.w eax | |
52 | %define stack_size 16*10 + 3*8 | |
53 | %define arg(x) [rsp + stack_size + PS + PS*x] | |
54 | %define func(x) proc_frame x | |
55 | ||
56 | %macro FUNC_SAVE 0 | |
57 | sub rsp, stack_size | |
58 | vmovdqa [rsp+16*0],xmm6 | |
59 | vmovdqa [rsp+16*1],xmm7 | |
60 | vmovdqa [rsp+16*2],xmm8 | |
61 | vmovdqa [rsp+16*3],xmm9 | |
62 | vmovdqa [rsp+16*4],xmm10 | |
63 | vmovdqa [rsp+16*5],xmm11 | |
64 | vmovdqa [rsp+16*6],xmm12 | |
65 | vmovdqa [rsp+16*7],xmm13 | |
66 | vmovdqa [rsp+16*8],xmm14 | |
67 | vmovdqa [rsp+16*9],xmm15 | |
68 | save_reg r12, 10*16 + 0*8 | |
69 | save_reg r15, 10*16 + 1*8 | |
70 | end_prolog | |
71 | mov arg4, arg(4) | |
72 | mov arg5, arg(5) | |
73 | %endmacro | |
74 | ||
75 | %macro FUNC_RESTORE 0 | |
76 | vmovdqa xmm6, [rsp+16*0] | |
77 | vmovdqa xmm7, [rsp+16*1] | |
78 | vmovdqa xmm8, [rsp+16*2] | |
79 | vmovdqa xmm9, [rsp+16*3] | |
80 | vmovdqa xmm10, [rsp+16*4] | |
81 | vmovdqa xmm11, [rsp+16*5] | |
82 | vmovdqa xmm12, [rsp+16*6] | |
83 | vmovdqa xmm13, [rsp+16*7] | |
84 | vmovdqa xmm14, [rsp+16*8] | |
85 | vmovdqa xmm15, [rsp+16*9] | |
86 | mov r12, [rsp + 10*16 + 0*8] | |
87 | mov r15, [rsp + 10*16 + 1*8] | |
88 | add rsp, stack_size | |
89 | %endmacro | |
90 | ||
91 | %elifidn __OUTPUT_FORMAT__, elf64 | |
92 | %define arg0 rdi | |
93 | %define arg0.w edi | |
94 | %define arg1 rsi | |
95 | %define arg2 rdx | |
96 | %define arg3 rcx | |
97 | %define arg4 r8 | |
98 | %define arg5 r9 | |
99 | ||
100 | %define tmp r11 | |
101 | %define tmp.w r11d | |
102 | %define tmp.b r11b | |
103 | %define return rax | |
104 | %define return.w eax | |
105 | ||
106 | %define func(x) x: | |
107 | %define FUNC_SAVE | |
108 | %define FUNC_RESTORE | |
109 | %endif | |
110 | ||
111 | ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest) | |
112 | %define len arg0 | |
113 | %define len.w arg0.w | |
114 | %define vec arg1 | |
115 | %define vec_i arg2 | |
116 | %define mul_array arg3 | |
117 | %define src arg4 | |
118 | %define dest1 arg5 | |
119 | %define pos return | |
120 | %define pos.w return.w | |
121 | ||
122 | %define dest2 mul_array | |
123 | %define dest3 vec_i | |
124 | ||
125 | %ifndef EC_ALIGNED_ADDR | |
126 | ;;; Use Un-aligned load/store | |
127 | %define XLDR vmovdqu | |
128 | %define XSTR vmovdqu | |
129 | %else | |
130 | ;;; Use Non-temporal load/stor | |
131 | %ifdef NO_NT_LDST | |
132 | %define XLDR vmovdqa | |
133 | %define XSTR vmovdqa | |
134 | %else | |
135 | %define XLDR vmovntdqa | |
136 | %define XSTR vmovntdq | |
137 | %endif | |
138 | %endif | |
139 | ||
140 | ||
141 | default rel | |
142 | ||
143 | [bits 64] | |
144 | section .text | |
145 | ||
146 | %define xmask0f ymm15 | |
147 | %define xmask0fx xmm15 | |
148 | %define xgft1_lo ymm14 | |
149 | %define xgft1_hi ymm13 | |
150 | %define xgft2_lo ymm12 | |
151 | %define xgft3_lo ymm11 | |
152 | ||
153 | %define x0 ymm0 | |
154 | %define xtmpa ymm1 | |
155 | %define xtmph1 ymm2 | |
156 | %define xtmpl1 ymm3 | |
157 | %define xtmph2 ymm4 | |
158 | %define xtmpl2 ymm5 | |
159 | %define xtmpl2x xmm5 | |
160 | %define xtmph3 ymm6 | |
161 | %define xtmpl3 ymm7 | |
162 | %define xtmpl3x xmm7 | |
163 | %define xd1 ymm8 | |
164 | %define xd2 ymm9 | |
165 | %define xd3 ymm10 | |
166 | ||
167 | align 16 | |
168 | global gf_3vect_mad_avx2:function | |
169 | func(gf_3vect_mad_avx2) | |
170 | FUNC_SAVE | |
171 | sub len, 32 | |
172 | jl .return_fail | |
173 | xor pos, pos | |
174 | mov tmp.b, 0x0f | |
175 | vpinsrb xmask0fx, xmask0fx, tmp.w, 0 | |
176 | vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f... | |
177 | ||
178 | sal vec_i, 5 ;Multiply by 32 | |
179 | sal vec, 5 | |
180 | lea tmp, [mul_array + vec_i] | |
181 | ||
182 | vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f} | |
183 | ; " Ax{00}, Ax{10}, ..., Ax{f0} | |
184 | vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x11 ; swapped to hi | hi | |
185 | vperm2i128 xgft1_lo, xgft1_lo, xgft1_lo, 0x00 ; swapped to lo | lo | |
186 | ||
187 | vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ... | |
188 | ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0} | |
189 | vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ... | |
190 | ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0} | |
191 | mov dest2, [dest1+PS] ; reuse mul_array | |
192 | mov dest3, [dest1+2*PS] ; reuse vec_i | |
193 | mov dest1, [dest1] | |
194 | ||
195 | .loop32: | |
196 | XLDR x0, [src+pos] ;Get next source vector | |
197 | XLDR xd1, [dest1+pos] ;Get next dest vector | |
198 | XLDR xd2, [dest2+pos] ;Get next dest vector | |
199 | XLDR xd3, [dest3+pos] ;Get next dest vector | |
200 | vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi | |
201 | vperm2i128 xtmpl2, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo | |
202 | ||
203 | vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi | |
204 | vperm2i128 xtmpl3, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo | |
205 | ||
206 | vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
207 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
208 | vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
209 | ||
210 | ; dest1 | |
211 | vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble | |
212 | vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
213 | vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials | |
214 | vpxor xd1, xd1, xtmph1 ;xd1 += partial | |
215 | ||
216 | ; dest2 | |
217 | vpshufb xtmph2, x0 ;Lookup mul table of high nibble | |
218 | vpshufb xtmpl2, xtmpa ;Lookup mul table of low nibble | |
219 | vpxor xtmph2, xtmpl2 ;GF add high and low partials | |
220 | vpxor xd2, xtmph2 ;xd2 += partial | |
221 | ||
222 | ; dest3 | |
223 | vpshufb xtmph3, x0 ;Lookup mul table of high nibble | |
224 | vpshufb xtmpl3, xtmpa ;Lookup mul table of low nibble | |
225 | vpxor xtmph3, xtmpl3 ;GF add high and low partials | |
226 | vpxor xd3, xtmph3 ;xd3 += partial | |
227 | ||
228 | XSTR [dest1+pos], xd1 | |
229 | XSTR [dest2+pos], xd2 | |
230 | XSTR [dest3+pos], xd3 | |
231 | ||
232 | add pos, 32 ;Loop on 32 bytes at a time | |
233 | cmp pos, len | |
234 | jle .loop32 | |
235 | ||
236 | lea tmp, [len + 32] | |
237 | cmp pos, tmp | |
238 | je .return_pass | |
239 | ||
240 | .lessthan32: | |
241 | ;; Tail len | |
242 | ;; Do one more overlap pass | |
243 | mov tmp.b, 0x1f | |
244 | vpinsrb xtmpl2x, xtmpl2x, tmp.w, 0 | |
245 | vpbroadcastb xtmpl2, xtmpl2x ;Construct mask 0x1f1f1f... | |
246 | ||
247 | mov tmp, len ;Overlapped offset length-32 | |
248 | ||
249 | XLDR x0, [src+tmp] ;Get next source vector | |
250 | XLDR xd1, [dest1+tmp] ;Get next dest vector | |
251 | XLDR xd2, [dest2+tmp] ;Get next dest vector | |
252 | XLDR xd3, [dest3+tmp] ;Get next dest vector | |
253 | ||
254 | sub len, pos | |
255 | ||
256 | vmovdqa xtmph3, [constip32] ;Load const of i + 32 | |
257 | vpinsrb xtmpl3x, xtmpl3x, len.w, 15 | |
258 | vinserti128 xtmpl3, xtmpl3, xtmpl3x, 1 ;swapped to xtmpl3x | xtmpl3x | |
259 | vpshufb xtmpl3, xtmpl3, xtmpl2 ;Broadcast len to all bytes. xtmpl2=0x1f1f1f... | |
260 | vpcmpgtb xtmpl3, xtmpl3, xtmph3 | |
261 | ||
262 | vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi | |
263 | vperm2i128 xgft2_lo, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo | |
264 | ||
265 | vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi | |
266 | vperm2i128 xgft3_lo, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo | |
267 | ||
268 | vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
269 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
270 | vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
271 | ||
272 | ; dest1 | |
273 | vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble | |
274 | vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
275 | vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials | |
276 | vpand xtmph1, xtmph1, xtmpl3 | |
277 | vpxor xd1, xd1, xtmph1 ;xd1 += partial | |
278 | ||
279 | ; dest2 | |
280 | vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble | |
281 | vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble | |
282 | vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials | |
283 | vpand xtmph2, xtmph2, xtmpl3 | |
284 | vpxor xd2, xd2, xtmph2 ;xd2 += partial | |
285 | ||
286 | ; dest3 | |
287 | vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble | |
288 | vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble | |
289 | vpxor xtmph3, xtmph3, xgft3_lo ;GF add high and low partials | |
290 | vpand xtmph3, xtmph3, xtmpl3 | |
291 | vpxor xd3, xd3, xtmph3 ;xd3 += partial | |
292 | ||
293 | XSTR [dest1+tmp], xd1 | |
294 | XSTR [dest2+tmp], xd2 | |
295 | XSTR [dest3+tmp], xd3 | |
296 | ||
297 | .return_pass: | |
298 | mov return, 0 | |
299 | FUNC_RESTORE | |
300 | ret | |
301 | ||
302 | .return_fail: | |
303 | mov return, 1 | |
304 | FUNC_RESTORE | |
305 | ret | |
306 | ||
307 | endproc_frame | |
308 | ||
309 | section .data | |
310 | ||
311 | align 32 | |
312 | constip32: | |
313 | ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff | |
314 | ddq 0xe0e1e2e3e4e5e6e7e8e9eaebecedeeef | |
315 | ||
316 | ;;; func core, ver, snum | |
317 | slversion gf_3vect_mad_avx2, 04, 01, 0208 |