1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_5vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
52 %define stack_size 16*10 + 3*8
53 %define arg(x) [rsp + stack_size + PS + PS*x]
54 %define func(x) proc_frame x
58 movdqa [rsp+16*0],xmm6
59 movdqa [rsp+16*1],xmm7
60 movdqa [rsp+16*2],xmm8
61 movdqa [rsp+16*3],xmm9
62 movdqa [rsp+16*4],xmm10
63 movdqa [rsp+16*5],xmm11
64 movdqa [rsp+16*6],xmm12
65 movdqa [rsp+16*7],xmm13
66 movdqa [rsp+16*8],xmm14
67 movdqa [rsp+16*9],xmm15
68 save_reg r12, 10*16 + 0*8
69 save_reg r15, 10*16 + 1*8
76 movdqa xmm6, [rsp+16*0]
77 movdqa xmm7, [rsp+16*1]
78 movdqa xmm8, [rsp+16*2]
79 movdqa xmm9, [rsp+16*3]
80 movdqa xmm10, [rsp+16*4]
81 movdqa xmm11, [rsp+16*5]
82 movdqa xmm12, [rsp+16*6]
83 movdqa xmm13, [rsp+16*7]
84 movdqa xmm14, [rsp+16*8]
85 movdqa xmm15, [rsp+16*9]
86 mov r12, [rsp + 10*16 + 0*8]
87 mov r15, [rsp + 10*16 + 1*8]
91 %elifidn __OUTPUT_FORMAT__, elf64
106 %define func(x) x: endbranch
111 ;;; gf_5vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
116 %define mul_array arg3
120 %define pos.w return.w
123 %define dest3 mul_array
127 %ifndef EC_ALIGNED_ADDR
128 ;;; Use Un-aligned load/store
132 ;;; Use Non-temporal load/stor
137 %define XLDR vmovntdqa
138 %define XSTR vmovntdq
147 %define xmask0f ymm15
148 %define xmask0fx xmm15
149 %define xgft1_lo ymm14
150 %define xgft2_lo ymm13
151 %define xgft3_lo ymm12
152 %define xgft4_lo ymm11
153 %define xgft5_lo ymm10
169 mk_global gf_5vect_mad_avx2, function
170 func(gf_5vect_mad_avx2)
176 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
177 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
179 sal vec_i, 5 ;Multiply by 32
180 sal vec, 5 ;Multiply by 32
181 lea tmp, [mul_array + vec_i]
183 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
184 ; " Ax{00}, Ax{10}, ..., Ax{f0}
185 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
186 ; " Bx{00}, Bx{10}, ..., Bx{f0}
187 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
188 ; " Cx{00}, Cx{10}, ..., Cx{f0}
189 vmovdqu xgft5_lo, [tmp+4*vec] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
190 ; " Ex{00}, Ex{10}, ..., Ex{f0}
192 vmovdqu xgft4_lo, [tmp+2*vec] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
193 ; " Dx{00}, Dx{10}, ..., Dx{f0}
195 mov dest3, [dest1+2*PS] ; reuse mul_array
196 mov dest4, [dest1+3*PS] ; reuse vec
197 mov dest5, [dest1+4*PS] ; reuse vec_i
198 mov dest2, [dest1+PS]
202 XLDR x0, [src+pos] ;Get next source vector
204 XLDR xd1, [dest1+pos] ;Get next dest vector
205 XLDR xd2, [dest2+pos] ;Get next dest vector
206 XLDR xd3, [dest3+pos] ;Get next dest vector
207 XLDR xd4, [dest4+pos] ;Get next dest vector
208 XLDR xd5, [dest5+pos] ;Get next dest vector
210 vpand xtmpl, x0, xmask0f ;Mask low src nibble in bits 4-0
211 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
212 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
213 vperm2i128 xtmpa, xtmpl, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
214 vperm2i128 x0, xtmpl, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
216 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
217 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
220 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
221 vpshufb xtmpl, xgft1_lo, xtmpa ;Lookup mul table of low nibble
222 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
223 vpxor xd1, xd1, xtmph1 ;xd1 += partial
225 vperm2i128 xtmph1, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
227 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
228 vpshufb xtmpl, xgft2_lo, xtmpa ;Lookup mul table of low nibble
229 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
230 vpxor xd2, xd2, xtmph2 ;xd2 += partial
232 vperm2i128 xtmph2, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
234 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
235 vpshufb xtmpl, xgft3_lo, xtmpa ;Lookup mul table of low nibble
236 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
237 vpxor xd3, xd3, xtmph1 ;xd3 += partial
239 vperm2i128 xtmph1, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
241 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
242 vpshufb xtmpl, xgft4_lo, xtmpa ;Lookup mul table of low nibble
243 vpxor xtmph2, xtmph2, xtmpl ;GF add high and low partials
244 vpxor xd4, xd4, xtmph2 ;xd4 += partial
247 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
248 vpshufb xtmpl, xgft5_lo, xtmpa ;Lookup mul table of low nibble
249 vpxor xtmph1, xtmph1, xtmpl ;GF add high and low partials
250 vpxor xd5, xd5, xtmph1 ;xd5 += partial
252 XSTR [dest1+pos], xd1
253 XSTR [dest2+pos], xd2
254 XSTR [dest3+pos], xd3
255 XSTR [dest4+pos], xd4
256 XSTR [dest5+pos], xd5
258 add pos, 32 ;Loop on 32 bytes at a time
268 ;; Do one more overlap pass
270 vpinsrb xtmph1x, xtmph1x, tmp.w, 0
271 vpbroadcastb xtmph1, xtmph1x ;Construct mask 0x1f1f1f...
273 mov tmp, len ;Overlapped offset length-32
275 XLDR x0, [src+tmp] ;Get next source vector
277 XLDR xd1, [dest1+tmp] ;Get next dest vector
278 XLDR xd2, [dest2+tmp] ;Get next dest vector
279 XLDR xd3, [dest3+tmp] ;Get next dest vector
280 XLDR xd4, [dest4+tmp] ;Get next dest vector
281 XLDR xd5, [dest5+tmp] ;Get next dest vector
285 vmovdqa xtmph2, [constip32] ;Load const of i + 32
286 vpinsrb xtmplx, xtmplx, len.w, 15
287 vinserti128 xtmpl, xtmpl, xtmplx, 1 ;swapped to xtmplx | xtmplx
288 vpshufb xtmpl, xtmpl, xtmph1 ;Broadcast len to all bytes. xtmph1=0x1f1f1f...
289 vpcmpgtb xtmpl, xtmpl, xtmph2
291 vpand xtmph1, x0, xmask0f ;Mask low src nibble in bits 4-0
292 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
293 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
294 vperm2i128 xtmpa, xtmph1, x0, 0x30 ;swap xtmpa from 1lo|2lo to 1lo|2hi
295 vperm2i128 x0, xtmph1, x0, 0x12 ;swap x0 from 1hi|2hi to 1hi|2lo
297 vperm2i128 xtmph1, xgft1_lo, xgft1_lo, 0x01 ; swapped to hi | lo
298 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x01 ; swapped to hi | lo
301 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
302 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
303 vpxor xtmph1, xtmph1, xgft1_lo ;GF add high and low partials
304 vpand xtmph1, xtmph1, xtmpl
305 vpxor xd1, xd1, xtmph1 ;xd1 += partial
307 vperm2i128 xtmph1, xgft3_lo, xgft3_lo, 0x01 ; swapped to hi | lo
309 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
310 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
311 vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
312 vpand xtmph2, xtmph2, xtmpl
313 vpxor xd2, xd2, xtmph2 ;xd2 += partial
315 vperm2i128 xtmph2, xgft4_lo, xgft4_lo, 0x01 ; swapped to hi | lo
317 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
318 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
319 vpxor xtmph1, xtmph1, xgft3_lo ;GF add high and low partials
320 vpand xtmph1, xtmph1, xtmpl
321 vpxor xd3, xd3, xtmph1 ;xd3 += partial
323 vperm2i128 xtmph1, xgft5_lo, xgft5_lo, 0x01 ; swapped to hi | lo
325 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
326 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
327 vpxor xtmph2, xtmph2, xgft4_lo ;GF add high and low partials
328 vpand xtmph2, xtmph2, xtmpl
329 vpxor xd4, xd4, xtmph2 ;xd4 += partial
332 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
333 vpshufb xgft5_lo, xgft5_lo, xtmpa ;Lookup mul table of low nibble
334 vpxor xtmph1, xtmph1, xgft5_lo ;GF add high and low partials
335 vpand xtmph1, xtmph1, xtmpl
336 vpxor xd5, xd5, xtmph1 ;xd5 += partial
338 XSTR [dest1+tmp], xd1
339 XSTR [dest2+tmp], xd2
340 XSTR [dest3+tmp], xd3
341 XSTR [dest4+tmp], xd4
342 XSTR [dest5+tmp], xd5
359 dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
360 dq 0xe8e9eaebecedeeef, 0xe0e1e2e3e4e5e6e7
362 ;;; func core, ver, snum
363 slversion gf_5vect_mad_avx2, 04, 01, 020e