1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2019 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_5vect_mad_avx512(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
36 %ifdef HAVE_AS_KNOWS_AVX512
38 %ifidn __OUTPUT_FORMAT__, elf64
53 %ifidn __OUTPUT_FORMAT__, win64
63 %define stack_size 16*10 + 3*8
64 %define arg(x) [rsp + stack_size + PS + PS*x]
65 %define func(x) proc_frame x
69 movdqa [rsp+16*0],xmm6
70 movdqa [rsp+16*1],xmm7
71 movdqa [rsp+16*2],xmm8
72 movdqa [rsp+16*3],xmm9
73 movdqa [rsp+16*4],xmm10
74 movdqa [rsp+16*5],xmm11
75 movdqa [rsp+16*6],xmm12
76 movdqa [rsp+16*7],xmm13
77 movdqa [rsp+16*8],xmm14
78 movdqa [rsp+16*9],xmm15
79 save_reg r12, 10*16 + 0*8
80 save_reg r15, 10*16 + 1*8
87 movdqa xmm6, [rsp+16*0]
88 movdqa xmm7, [rsp+16*1]
89 movdqa xmm8, [rsp+16*2]
90 movdqa xmm9, [rsp+16*3]
91 movdqa xmm10, [rsp+16*4]
92 movdqa xmm11, [rsp+16*5]
93 movdqa xmm12, [rsp+16*6]
94 movdqa xmm13, [rsp+16*7]
95 movdqa xmm14, [rsp+16*8]
96 movdqa xmm15, [rsp+16*9]
97 mov r12, [rsp + 10*16 + 0*8]
98 mov r15, [rsp + 10*16 + 1*8]
107 %define mul_array arg3
112 %define dest3 mul_array
116 %ifndef EC_ALIGNED_ADDR
117 ;;; Use Un-aligned load/store
118 %define XLDR vmovdqu8
119 %define XSTR vmovdqu8
121 ;;; Use Non-temporal load/stor
126 %define XLDR vmovntdqa
127 %define XSTR vmovntdq
141 %define xgft1_hi zmm6
142 %define xgft1_lo zmm7
143 %define xgft1_loy ymm7
144 %define xgft2_hi zmm8
145 %define xgft2_lo zmm9
146 %define xgft2_loy ymm9
147 %define xgft3_hi zmm10
148 %define xgft3_lo zmm11
149 %define xgft3_loy ymm11
150 %define xgft4_hi zmm12
151 %define xgft4_lo zmm13
152 %define xgft4_loy ymm13
153 %define xgft5_hi zmm14
154 %define xgft5_lo zmm15
155 %define xgft5_loy ymm15
161 %define xmask0f zmm21
170 global gf_5vect_mad_avx512:ISAL_SYM_TYPE_FUNCTION
171 func(gf_5vect_mad_avx512)
177 vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f...
178 sal vec_i, 5 ;Multiply by 32
179 sal vec, 5 ;Multiply by 32
180 lea tmp, [mul_array + vec_i]
181 vmovdqu xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0}
182 vmovdqu xgft2_loy, [tmp+vec] ;Load array Bx{00}..{0f}, Bx{00}..{f0}
183 vmovdqu xgft3_loy, [tmp+2*vec] ;Load array Cx{00}..{0f}, Cx{00}..{f0}
184 vmovdqu xgft5_loy, [tmp+4*vec] ;Load array Ex{00}..{0f}, Ex{00}..{f0}
186 vmovdqu xgft4_loy, [tmp+2*vec] ;Load array Dx{00}..{0f}, Dx{00}..{f0}
187 vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55
188 vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00
189 vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55
190 vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00
191 vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55
192 vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00
193 vshufi64x2 xgft4_hi, xgft4_lo, xgft4_lo, 0x55
194 vshufi64x2 xgft4_lo, xgft4_lo, xgft4_lo, 0x00
195 vshufi64x2 xgft5_hi, xgft5_lo, xgft5_lo, 0x55
196 vshufi64x2 xgft5_lo, xgft5_lo, xgft5_lo, 0x00
197 mov dest2, [dest1+PS]
198 mov dest3, [dest1+2*PS] ; reuse mul_array
199 mov dest4, [dest1+3*PS] ; reuse vec
200 mov dest5, [dest1+4*PS] ; reuse vec_i
206 XLDR x0, [src+pos] ;Get next source vector
207 XLDR xd1, [dest1+pos] ;Get next dest vector
208 XLDR xd2, [dest2+pos] ;Get next dest vector
209 XLDR xd3, [dest3+pos] ;Get next dest vector
210 XLDR xd4, [dest4+pos] ;reuse xtmpl1. Get next dest vector
211 XLDR xd5, [dest5+pos] ;Get next dest vector
213 vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
214 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
215 vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0
218 vpshufb xtmph1 {k1}{z}, xgft1_hi, x0 ;Lookup mul table of high nibble
219 vpshufb xtmpl1 {k1}{z}, xgft1_lo, xtmpa ;Lookup mul table of low nibble
220 vpxorq xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
221 vpxorq xd1, xd1, xtmph1 ;xd1 += partial
224 vpshufb xtmph2 {k1}{z}, xgft2_hi, x0 ;Lookup mul table of high nibble
225 vpshufb xtmpl2 {k1}{z}, xgft2_lo, xtmpa ;Lookup mul table of low nibble
226 vpxorq xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
227 vpxorq xd2, xd2, xtmph2 ;xd2 += partial
230 vpshufb xtmph3 {k1}{z}, xgft3_hi, x0 ;Lookup mul table of high nibble
231 vpshufb xtmpl3 {k1}{z}, xgft3_lo, xtmpa ;Lookup mul table of low nibble
232 vpxorq xtmph3, xtmph3, xtmpl3 ;GF add high and low partials
233 vpxorq xd3, xd3, xtmph3 ;xd2 += partial
236 vpshufb xtmph4 {k1}{z}, xgft4_hi, x0 ;Lookup mul table of high nibble
237 vpshufb xtmpl4 {k1}{z}, xgft4_lo, xtmpa ;Lookup mul table of low nibble
238 vpxorq xtmph4, xtmph4, xtmpl4 ;GF add high and low partials
239 vpxorq xd4, xd4, xtmph4 ;xd2 += partial
242 vpshufb xtmph5 {k1}{z}, xgft5_hi, x0 ;Lookup mul table of high nibble
243 vpshufb xtmpl5 {k1}{z}, xgft5_lo, xtmpa ;Lookup mul table of low nibble
244 vpxorq xtmph5, xtmph5, xtmpl5 ;GF add high and low partials
245 vpxorq xd5, xd5, xtmph5 ;xd2 += partial
247 XSTR [dest1+pos], xd1
248 XSTR [dest2+pos], xd2
249 XSTR [dest3+pos], xd3
250 XSTR [dest4+pos], xd4
251 XSTR [dest5+pos], xd5
253 add pos, 64 ;Loop on 64 bytes at a time
263 lea tmp, [len + 64 - 1]
267 mov pos, len ;Overlapped offset length-64
268 jmp .loop64 ;Do one more overlap pass
283 %ifidn __OUTPUT_FORMAT__, win64
284 global no_gf_5vect_mad_avx512
285 no_gf_5vect_mad_avx512:
287 %endif ; ifdef HAVE_AS_KNOWS_AVX512