1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_4vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
51 %define stack_size 16*10 + 3*8
52 %define arg(x) [rsp + stack_size + PS + PS*x]
53 %define func(x) proc_frame x
57 movdqa [rsp+16*0],xmm6
58 movdqa [rsp+16*1],xmm7
59 movdqa [rsp+16*2],xmm8
60 movdqa [rsp+16*3],xmm9
61 movdqa [rsp+16*4],xmm10
62 movdqa [rsp+16*5],xmm11
63 movdqa [rsp+16*6],xmm12
64 movdqa [rsp+16*7],xmm13
65 movdqa [rsp+16*8],xmm14
66 movdqa [rsp+16*9],xmm15
67 save_reg r12, 10*16 + 0*8
68 save_reg r15, 10*16 + 1*8
75 movdqa xmm6, [rsp+16*0]
76 movdqa xmm7, [rsp+16*1]
77 movdqa xmm8, [rsp+16*2]
78 movdqa xmm9, [rsp+16*3]
79 movdqa xmm10, [rsp+16*4]
80 movdqa xmm11, [rsp+16*5]
81 movdqa xmm12, [rsp+16*6]
82 movdqa xmm13, [rsp+16*7]
83 movdqa xmm14, [rsp+16*8]
84 movdqa xmm15, [rsp+16*9]
85 mov r12, [rsp + 10*16 + 0*8]
86 mov r15, [rsp + 10*16 + 1*8]
90 %elifidn __OUTPUT_FORMAT__, elf64
110 ;;; gf_4vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
115 %define mul_array arg3
119 %define pos.w return.w
121 %define dest2 mul_array
125 %ifndef EC_ALIGNED_ADDR
126 ;;; Use Un-aligned load/store
130 ;;; Use Non-temporal load/stor
135 %define XLDR vmovntdqa
136 %define XSTR vmovntdq
146 %define xmask0f ymm15
147 %define xmask0fx xmm15
148 %define xgft1_lo ymm14
149 %define xgft2_lo ymm13
150 %define xgft3_lo ymm12
151 %define xgft4_lo ymm11
168 global gf_4vect_mad_avx2:function
169 func(gf_4vect_mad_avx2)
175 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
176 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
178 sal vec_i, 5 ;Multiply by 32
179 sal vec, 5 ;Multiply by 32
180 lea tmp, [mul_array + vec_i]
182 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
183 ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
184 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
185 ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
186 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
187 ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
189 vmovdqu xgft4_lo, [tmp+2*vec] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
190 ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
192 mov dest2, [dest1+PS] ; reuse mul_array
193 mov dest3, [dest1+2*PS] ; reuse vec
194 mov dest4, [dest1+3*PS] ; reuse vec_i
198 XLDR x0, [src+pos] ;Get next source vector
200 XLDR xd1, [dest1+pos] ;Get next dest vector
201 XLDR xd2, [dest2+pos] ;Get next dest vector
202 XLDR xd3, [dest3+pos] ;Get next dest vector
203 XLDR xd4, [dest4+pos] ;reuse xtmpl1. Get next dest vector
205 vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
206 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
207 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
209 vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
210 vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
212 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
213 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
214 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
215 vperm2i128 xtmph4, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
218 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
219 vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
220 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
221 vpxor xd1, xd1, xtmph1 ;xd1 += partial
224 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
225 vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
226 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
227 vpxor xd2, xd2, xtmph2 ;xd2 += partial
230 vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
231 vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
232 vpxor xtmph3, xtmph3, xtmpl ;GF add high and low partials
233 vpxor xd3, xd3, xtmph3 ;xd3 += partial
236 vpshufb xtmph4, xtmph4, x0 ;Lookup mul table of high nibble
237 vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
238 vpxor xtmph4, xtmph4, xtmpl ;GF add high and low partials
239 vpxor xd4, xd4, xtmph4 ;xd4 += partial
241 XSTR [dest1+pos], xd1
242 XSTR [dest2+pos], xd2
243 XSTR [dest3+pos], xd3
244 XSTR [dest4+pos], xd4
246 add pos, 32 ;Loop on 32 bytes at a time
256 ;; Do one more overlap pass
258 vpinsrb xtmph1x, xtmph1x, tmp.w, 0
259 vpbroadcastb xtmph1, xtmph1x ;Construct mask 0x1f1f1f...
261 mov tmp, len ;Overlapped offset length-32
263 XLDR x0, [src+tmp] ;Get next source vector
265 XLDR xd1, [dest1+tmp] ;Get next dest vector
266 XLDR xd2, [dest2+tmp] ;Get next dest vector
267 XLDR xd3, [dest3+tmp] ;Get next dest vector
268 XLDR xd4, [dest4+tmp] ;Get next dest vector
272 vmovdqa xtmph2, [constip32] ;Load const of i + 32
273 vpinsrb xtmplx, xtmplx, len.w, 15
274 vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
275 vpshufb xtmpl, xtmpl, xtmph1 ;Broadcast len to all bytes. xtmph1=0x1f1f1f...
276 vpcmpgtb xtmpl, xtmpl, xtmph2
278 vpand xtmph1, x0, xmask0f ;Mask low src nibble in bits 4-0
279 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
280 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
282 vperm2i128 xtmpa, xtmph1, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
283 vperm2i128 x0, xtmph1, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
285 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
286 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
287 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
288 vperm2i128 xtmph4, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
291 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
292 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
293 vpxor xtmph1, xtmph1, xgft1_lo ;GF add high and low partials
294 vpand xtmph1, xtmph1, xtmpl
295 vpxor xd1, xd1, xtmph1 ;xd1 += partial
298 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
299 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
300 vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
301 vpand xtmph2, xtmph2, xtmpl
302 vpxor xd2, xd2, xtmph2 ;xd2 += partial
305 vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
306 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
307 vpxor xtmph3, xtmph3, xgft3_lo ;GF add high and low partials
308 vpand xtmph3, xtmph3, xtmpl
309 vpxor xd3, xd3, xtmph3 ;xd3 += partial
312 vpshufb xtmph4, xtmph4, x0 ;Lookup mul table of high nibble
313 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
314 vpxor xtmph4, xtmph4, xgft4_lo ;GF add high and low partials
315 vpand xtmph4, xtmph4, xtmpl
316 vpxor xd4, xd4, xtmph4 ;xd4 += partial
318 XSTR [dest1+tmp], xd1
319 XSTR [dest2+tmp], xd2
320 XSTR [dest3+tmp], xd3
321 XSTR [dest4+tmp], xd4
338 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
339 ddq 0xe0e1e2e3e4e5e6e7e8e9eaebecedeeef
341 ;;; func core, ver, snum
342 slversion gf_4vect_mad_avx2, 04, 01, 020b