1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_3vect_dot_prod_avx512(len, vec, *g_tbls, **buffs, **dests);
34 %include "reg_sizes.asm"
36 %ifdef HAVE_AS_KNOWS_AVX512
38 %ifidn __OUTPUT_FORMAT__, elf64
50 %define tmp3 r13 ; must be saved and restored
51 %define tmp4 r12 ; must be saved and restored
56 %define func(x) x: endbranch
67 %ifidn __OUTPUT_FORMAT__, win64
73 %define arg4 r12 ; must be saved, loaded and restored
74 %define arg5 r15 ; must be saved and restored
79 %define tmp3 r13 ; must be saved and restored
80 %define tmp4 r14 ; must be saved and restored
84 %define stack_size 9*16 + 5*8 ; must be an odd multiple of 8
85 %define arg(x) [rsp + stack_size + PS + PS*x]
87 %define func(x) proc_frame x
89 alloc_stack stack_size
90 vmovdqa [rsp + 0*16], xmm6
91 vmovdqa [rsp + 1*16], xmm7
92 vmovdqa [rsp + 2*16], xmm8
93 vmovdqa [rsp + 3*16], xmm9
94 vmovdqa [rsp + 4*16], xmm10
95 vmovdqa [rsp + 5*16], xmm11
96 vmovdqa [rsp + 6*16], xmm12
97 vmovdqa [rsp + 7*16], xmm13
98 vmovdqa [rsp + 8*16], xmm14
99 save_reg r12, 9*16 + 0*8
100 save_reg r13, 9*16 + 1*8
101 save_reg r14, 9*16 + 2*8
102 save_reg r15, 9*16 + 3*8
107 %macro FUNC_RESTORE 0
108 vmovdqa xmm6, [rsp + 0*16]
109 vmovdqa xmm7, [rsp + 1*16]
110 vmovdqa xmm8, [rsp + 2*16]
111 vmovdqa xmm9, [rsp + 3*16]
112 vmovdqa xmm10, [rsp + 4*16]
113 vmovdqa xmm11, [rsp + 5*16]
114 vmovdqa xmm12, [rsp + 6*16]
115 vmovdqa xmm13, [rsp + 7*16]
116 vmovdqa xmm14, [rsp + 8*16]
117 mov r12, [rsp + 9*16 + 0*8]
118 mov r13, [rsp + 9*16 + 1*8]
119 mov r14, [rsp + 9*16 + 2*8]
120 mov r15, [rsp + 9*16 + 3*8]
128 %define mul_array arg2
138 %ifndef EC_ALIGNED_ADDR
139 ;;; Use Un-aligned load/store
140 %define XLDR vmovdqu8
141 %define XSTR vmovdqu8
143 ;;; Use Non-temporal load/stor
148 %define XLDR vmovntdqa
149 %define XSTR vmovntdq
153 %define xmask0f zmm11
154 %define xgft1_lo zmm10
155 %define xgft1_loy ymm10
156 %define xgft1_hi zmm9
157 %define xgft2_lo zmm8
158 %define xgft2_loy ymm8
159 %define xgft2_hi zmm7
160 %define xgft3_lo zmm6
161 %define xgft3_loy ymm6
162 %define xgft3_hi zmm5
176 mk_global gf_3vect_dot_prod_avx512, function
177 func(gf_3vect_dot_prod_avx512)
184 vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
185 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS
186 mov dest2, [dest1+PS]
187 mov dest3, [dest1+2*PS]
199 XLDR x0, [ptr+pos] ;Get next source vector
202 vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
203 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
204 vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
206 vmovdqu8 xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
207 vmovdqu8 xgft2_loy, [tmp+vec*(32/PS)] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
208 vmovdqu8 xgft3_loy, [tmp+vec*(64/PS)] ;Load array Cx{00}..{0f}, Cx{00}..{f0}
211 vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
212 vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
213 vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
214 vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
216 vpshufb xgft1_hi, xgft1_hi, x0 ;Lookup mul table of high nibble
217 vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble
218 vpxorq xgft1_hi, xgft1_hi, xgft1_lo ;GF add high and low partials
219 vpxorq xp1, xp1, xgft1_hi ;xp1 += partial
221 vpshufb xgft2_hi, xgft2_hi, x0 ;Lookup mul table of high nibble
222 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
223 vpxorq xgft2_hi, xgft2_hi, xgft2_lo ;GF add high and low partials
224 vpxorq xp2, xp2, xgft2_hi ;xp2 += partial
226 vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55
227 vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00
229 vpshufb xgft3_hi, xgft3_hi, x0 ;Lookup mul table of high nibble
230 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
231 vpxorq xgft3_hi, xgft3_hi, xgft3_lo ;GF add high and low partials
232 vpxorq xp3, xp3, xgft3_hi ;xp3 += partial
237 XSTR [dest1+pos], xp1
238 XSTR [dest2+pos], xp2
239 XSTR [dest3+pos], xp3
241 add pos, 64 ;Loop on 64 bytes at a time
250 mov pos, len ;Overlapped offset length-64
251 jmp .loop64 ;Do one more overlap pass
266 %ifidn __OUTPUT_FORMAT__, win64
267 global no_gf_3vect_dot_prod_avx512
268 no_gf_3vect_dot_prod_avx512:
270 %endif ; ifdef HAVE_AS_KNOWS_AVX512