1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_4vect_dot_prod_avx(len, vec, *g_tbls, **buffs, **dests);
34 %include "reg_sizes.asm"
36 %ifidn __OUTPUT_FORMAT__, elf64
46 %define tmp3 r13 ; must be saved and restored
47 %define tmp4 r12 ; must be saved and restored
48 %define tmp5 r14 ; must be saved and restored
49 %define tmp6 r15 ; must be saved and restored
72 %ifidn __OUTPUT_FORMAT__, win64
78 %define arg4 r12 ; must be saved, loaded and restored
79 %define arg5 r15 ; must be saved and restored
82 %define tmp3 r13 ; must be saved and restored
83 %define tmp4 r14 ; must be saved and restored
84 %define tmp5 rdi ; must be saved and restored
85 %define tmp6 rsi ; must be saved and restored
92 %define stack_size 9*16 + 7*8 ; must be an odd multiple of 8
93 %define arg(x) [rsp + stack_size + PS + PS*x]
95 %define func(x) proc_frame x
97 alloc_stack stack_size
98 save_xmm128 xmm6, 0*16
99 save_xmm128 xmm7, 1*16
100 save_xmm128 xmm8, 2*16
101 save_xmm128 xmm9, 3*16
102 save_xmm128 xmm10, 4*16
103 save_xmm128 xmm11, 5*16
104 save_xmm128 xmm12, 6*16
105 save_xmm128 xmm13, 7*16
106 save_xmm128 xmm14, 8*16
107 save_reg r12, 9*16 + 0*8
108 save_reg r13, 9*16 + 1*8
109 save_reg r14, 9*16 + 2*8
110 save_reg r15, 9*16 + 3*8
111 save_reg rdi, 9*16 + 4*8
112 save_reg rsi, 9*16 + 5*8
117 %macro FUNC_RESTORE 0
118 vmovdqa xmm6, [rsp + 0*16]
119 vmovdqa xmm7, [rsp + 1*16]
120 vmovdqa xmm8, [rsp + 2*16]
121 vmovdqa xmm9, [rsp + 3*16]
122 vmovdqa xmm10, [rsp + 4*16]
123 vmovdqa xmm11, [rsp + 5*16]
124 vmovdqa xmm12, [rsp + 6*16]
125 vmovdqa xmm13, [rsp + 7*16]
126 vmovdqa xmm14, [rsp + 8*16]
127 mov r12, [rsp + 9*16 + 0*8]
128 mov r13, [rsp + 9*16 + 1*8]
129 mov r14, [rsp + 9*16 + 2*8]
130 mov r15, [rsp + 9*16 + 3*8]
131 mov rdi, [rsp + 9*16 + 4*8]
132 mov rsi, [rsp + 9*16 + 5*8]
137 %ifidn __OUTPUT_FORMAT__, elf32
139 ;;;================== High Address;
146 ;;;<================= esp of caller
148 ;;;<================= ebp = esp
156 ;;;<================= esp of callee
158 ;;;================== Low Address;
163 %define arg(x) [ebp + PS*2 + PS*x]
164 %define var(x) [ebp - PS - PS*x]
168 %define arg0 trans ;trans and trans2 are for the variables in stack
169 %define arg0_m arg(0)
172 %define arg2_m arg(2)
174 %define arg3_m arg(3)
176 %define arg4_m arg(4)
181 %define tmp3_m var(0)
183 %define tmp4_m var(1)
185 %define tmp5_m var(2)
187 %define tmp6_m var(3)
189 %macro SLDR 2 ;stack load/restore
197 sub esp, PS*4 ;4 local variables
204 %macro FUNC_RESTORE 0
208 add esp, PS*4 ;4 local variables
212 %endif ; output formats
216 %define mul_array arg2
227 %ifidn PS,4 ;32-bit code
230 %define dest1_m arg4_m
231 %define dest2_m tmp3_m
232 %define dest3_m tmp4_m
233 %define dest4_m tmp5_m
234 %define vskip3_m tmp6_m
237 %ifndef EC_ALIGNED_ADDR
238 ;;; Use Un-aligned load/store
242 ;;; Use Non-temporal load/stor
247 %define XLDR vmovntdqa
248 %define XSTR vmovntdq
252 %ifidn PS,8 ; 64-bit code
260 %ifidn PS,8 ;64-bit code
261 %define xmask0f xmm14
262 %define xgft1_lo xmm13
263 %define xgft1_hi xmm12
264 %define xgft2_lo xmm11
265 %define xgft2_hi xmm10
266 %define xgft3_lo xmm9
267 %define xgft3_hi xmm8
268 %define xgft4_lo xmm7
269 %define xgft4_hi xmm6
278 %define xmm_trans xmm7 ;reuse xmask0f and xgft1_lo
279 %define xmask0f xmm_trans
280 %define xgft1_lo xmm_trans
281 %define xgft1_hi xmm6
282 %define xgft2_lo xgft1_lo
283 %define xgft2_hi xgft1_hi
284 %define xgft3_lo xgft1_lo
285 %define xgft3_hi xgft1_hi
286 %define xgft4_lo xgft1_lo
287 %define xgft4_hi xgft1_hi
297 global gf_4vect_dot_prod_avx:function
298 func(gf_4vect_dot_prod_avx)
305 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
308 SSTR vskip3_m, vskip3
309 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
311 mov dest2, [dest1+PS]
313 mov dest3, [dest1+2*PS]
315 mov dest4, [dest1+3*PS]
332 %ifidn PS,8 ;64-bit code
333 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
334 vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
335 vmovdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
336 vmovdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
337 vmovdqu xgft3_lo, [tmp+vec*(64/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
338 vmovdqu xgft3_hi, [tmp+vec*(64/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
339 vmovdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
340 vmovdqu xgft4_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
342 XLDR x0, [ptr+pos] ;Get next source vector
346 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
347 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
348 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
350 XLDR x0, [ptr+pos] ;Get next source vector
351 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
353 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
354 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
355 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
357 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
358 vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0}
361 vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble
362 vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble
363 vpxor xgft1_hi, xgft1_lo ;GF add high and low partials
364 vpxor xp1, xgft1_hi ;xp1 += partial
366 %ifidn PS,4 ;32-bit code
367 vmovdqu xgft2_lo, [tmp+vec*(32/PS)] ;Load array Bx{00}, Bx{01}, ..., Bx{0f}
368 vmovdqu xgft2_hi, [tmp+vec*(32/PS)+16] ; " Bx{00}, Bx{10}, ..., Bx{f0}
370 vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble
371 vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble
372 vpxor xgft2_hi, xgft2_lo ;GF add high and low partials
373 vpxor xp2, xgft2_hi ;xp2 += partial
375 %ifidn PS,4 ;32-bit code
377 vmovdqu xgft3_lo, [tmp+vec*(32/PS)] ;Load array Cx{00}, Cx{01}, ..., Cx{0f}
378 vmovdqu xgft3_hi, [tmp+vec*(32/PS)+16] ; " Cx{00}, Cx{10}, ..., Cx{f0}
381 vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble
382 vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble
383 vpxor xgft3_hi, xgft3_lo ;GF add high and low partials
384 vpxor xp3, xgft3_hi ;xp3 += partial
386 %ifidn PS,4 ;32-bit code
387 SLDR vskip3, vskip3_m
388 vmovdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f}
389 vmovdqu xgft4_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0}
393 vpshufb xgft4_hi, x0 ;Lookup mul table of high nibble
394 vpshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
395 vpxor xgft4_hi, xgft4_lo ;GF add high and low partials
396 vpxor xp4, xgft4_hi ;xp4 += partial
403 XSTR [dest1+pos], xp1
404 XSTR [dest2+pos], xp2
406 XSTR [dest3+pos], xp3
408 XSTR [dest4+pos], xp4
411 add pos, 16 ;Loop on 16 bytes at a time
420 mov pos, len ;Overlapped offset length-16
421 jmp .loop16 ;Do one more overlap pass
438 mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
440 ;;; func core, ver, snum
441 slversion gf_4vect_dot_prod_avx, 02, 05, 0193