1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_4vect_mad_avx(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
51 %define stack_size 16*10 + 3*8
52 %define arg(x) [rsp + stack_size + PS + PS*x]
53 %define func(x) proc_frame x
57 movdqa [rsp+16*0],xmm6
58 movdqa [rsp+16*1],xmm7
59 movdqa [rsp+16*2],xmm8
60 movdqa [rsp+16*3],xmm9
61 movdqa [rsp+16*4],xmm10
62 movdqa [rsp+16*5],xmm11
63 movdqa [rsp+16*6],xmm12
64 movdqa [rsp+16*7],xmm13
65 movdqa [rsp+16*8],xmm14
66 movdqa [rsp+16*9],xmm15
67 save_reg r12, 10*16 + 0*8
68 save_reg r13, 10*16 + 1*8
69 save_reg r15, 10*16 + 2*8
76 movdqa xmm6, [rsp+16*0]
77 movdqa xmm7, [rsp+16*1]
78 movdqa xmm8, [rsp+16*2]
79 movdqa xmm9, [rsp+16*3]
80 movdqa xmm10, [rsp+16*4]
81 movdqa xmm11, [rsp+16*5]
82 movdqa xmm12, [rsp+16*6]
83 movdqa xmm13, [rsp+16*7]
84 movdqa xmm14, [rsp+16*8]
85 movdqa xmm15, [rsp+16*9]
86 mov r12, [rsp + 10*16 + 0*8]
87 mov r13, [rsp + 10*16 + 1*8]
88 mov r15, [rsp + 10*16 + 2*8]
92 %elifidn __OUTPUT_FORMAT__, elf64
110 %macro FUNC_RESTORE 0
115 ;;; gf_4vect_mad_avx(len, vec, vec_i, mul_array, src, dest)
120 %define mul_array arg3
124 %define pos.w return.w
126 %define dest2 mul_array
130 %ifndef EC_ALIGNED_ADDR
131 ;;; Use Un-aligned load/store
135 ;;; Use Non-temporal load/stor
140 %define XLDR vmovntdqa
141 %define XSTR vmovntdq
151 %define xmask0f xmm15
152 %define xgft3_hi xmm14
153 %define xgft4_hi xmm13
154 %define xgft4_lo xmm12
172 global gf_4vect_mad_avx:function
173 func(gf_4vect_mad_avx)
178 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
182 sal vec_i, 5 ;Multiply by 32
183 lea tmp3, [mul_array + vec_i]
185 sal tmp, 6 ;Multiply by 64
186 vmovdqu xgft3_hi, [tmp3+tmp+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
187 sal vec, 5 ;Multiply by 32
189 vmovdqu xgft4_lo, [tmp3+tmp] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
190 vmovdqu xgft4_hi, [tmp3+tmp+16] ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
192 mov dest2, [dest1+PS] ; reuse mul_array
193 mov dest3, [dest1+2*PS]
194 mov dest4, [dest1+3*PS] ; reuse vec_i
198 XLDR x0, [src+pos] ;Get next source vector
199 vmovdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
200 vmovdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
201 vmovdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
202 vmovdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
203 vmovdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
205 XLDR xd1, [dest1+pos] ;Get next dest vector
206 XLDR xd2, [dest2+pos] ;Get next dest vector
208 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
209 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
210 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
213 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
214 vpshufb xtmpl1, xtmpl1, xtmpa ;Lookup mul table of low nibble
215 vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
216 vpxor xd1, xd1, xtmph1
218 XLDR xd3, [dest3+pos] ;Reuse xtmph1, Get next dest vector
219 XLDR xd4, [dest4+pos] ;Reuse xtmpl1, Get next dest vector
222 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
223 vpshufb xtmpl2, xtmpl2, xtmpa ;Lookup mul table of low nibble
224 vpxor xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
225 vpxor xd2, xd2, xtmph2
228 vpshufb xtmph3, xgft3_hi, x0 ;Lookup mul table of high nibble
229 vpshufb xtmpl3, xtmpl3, xtmpa ;Lookup mul table of low nibble
230 vpxor xtmph3, xtmph3, xtmpl3 ;GF add high and low partials
231 vpxor xd3, xd3, xtmph3
234 vpshufb xtmph4, xgft4_hi, x0 ;Lookup mul table of high nibble
235 vpshufb xtmpl4, xgft4_lo, xtmpa ;Lookup mul table of low nibble
236 vpxor xtmph4, xtmph4, xtmpl4 ;GF add high and low partials
237 vpxor xd4, xd4, xtmph4
239 XSTR [dest1+pos], xd1 ;Store result
240 XSTR [dest2+pos], xd2 ;Store result
241 XSTR [dest3+pos], xd3 ;Store result
242 XSTR [dest4+pos], xd4 ;Store result
244 add pos, 16 ;Loop on 16 bytes at a time
254 ;; Do one more overlap pass
256 mov tmp, len ;Overlapped offset length-16
258 XLDR x0, [src+tmp] ;Get next source vector
260 vmovdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
261 vmovdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
262 vmovdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
263 vmovdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
264 vmovdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
266 XLDR xd1, [dest1+tmp] ;Get next dest vector
267 XLDR xd2, [dest2+tmp] ;Get next dest vector
268 XLDR xtmph4, [dest3+tmp] ;Get next dest vector
272 vmovdqa xtmpl4, [constip16] ;Load const of i + 16
273 vpinsrb xtmph3, xtmph3, len.w, 15
274 vpshufb xtmph3, xtmph3, xmask0f ;Broadcast len to all bytes
275 vpcmpgtb xtmph3, xtmph3, xtmpl4
277 XLDR xtmpl4, [dest4+tmp] ;Get next dest vector
279 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
280 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
281 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
284 vpshufb xtmph1, xtmph1, x0 ;Lookup mul table of high nibble
285 vpshufb xtmpl1, xtmpl1, xtmpa ;Lookup mul table of low nibble
286 vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
287 vpand xtmph1, xtmph1, xtmph3
288 vpxor xd1, xd1, xtmph1
291 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
292 vpshufb xtmpl2, xtmpl2, xtmpa ;Lookup mul table of low nibble
293 vpxor xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
294 vpand xtmph2, xtmph2, xtmph3
295 vpxor xd2, xd2, xtmph2
298 vpshufb xgft3_hi, xgft3_hi, x0 ;Lookup mul table of high nibble
299 vpshufb xtmpl3, xtmpl3, xtmpa ;Lookup mul table of low nibble
300 vpxor xgft3_hi, xgft3_hi, xtmpl3 ;GF add high and low partials
301 vpand xgft3_hi, xgft3_hi, xtmph3
302 vpxor xtmph4, xtmph4, xgft3_hi
305 vpshufb xgft4_hi, xgft4_hi, x0 ;Lookup mul table of high nibble
306 vpshufb xgft4_lo, xgft4_lo, xtmpa ;Lookup mul table of low nibble
307 vpxor xgft4_hi, xgft4_hi, xgft4_lo ;GF add high and low partials
308 vpand xgft4_hi, xgft4_hi, xtmph3
309 vpxor xtmpl4, xtmpl4, xgft4_hi
311 XSTR [dest1+tmp], xd1 ;Store result
312 XSTR [dest2+tmp], xd2 ;Store result
313 XSTR [dest3+tmp], xtmph4 ;Store result
314 XSTR [dest4+tmp], xtmpl4 ;Store result
331 mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
333 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
335 ;;; func core, ver, snum
336 slversion gf_4vect_mad_avx, 02, 01, 020a