1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
44 %define arg4 r12 ; must be saved, loaded and restored
45 %define arg5 r15 ; must be saved and restored
52 %define stack_size 16*10 + 3*8
53 %define arg(x) [rsp + stack_size + PS + PS*x]
54 %define func(x) proc_frame x
58 vmovdqa [rsp+16*0],xmm6
59 vmovdqa [rsp+16*1],xmm7
60 vmovdqa [rsp+16*2],xmm8
61 vmovdqa [rsp+16*3],xmm9
62 vmovdqa [rsp+16*4],xmm10
63 vmovdqa [rsp+16*5],xmm11
64 vmovdqa [rsp+16*6],xmm12
65 vmovdqa [rsp+16*7],xmm13
66 vmovdqa [rsp+16*8],xmm14
67 vmovdqa [rsp+16*9],xmm15
68 save_reg r12, 10*16 + 0*8
69 save_reg r15, 10*16 + 1*8
76 vmovdqa xmm6, [rsp+16*0]
77 vmovdqa xmm7, [rsp+16*1]
78 vmovdqa xmm8, [rsp+16*2]
79 vmovdqa xmm9, [rsp+16*3]
80 vmovdqa xmm10, [rsp+16*4]
81 vmovdqa xmm11, [rsp+16*5]
82 vmovdqa xmm12, [rsp+16*6]
83 vmovdqa xmm13, [rsp+16*7]
84 vmovdqa xmm14, [rsp+16*8]
85 vmovdqa xmm15, [rsp+16*9]
86 mov r12, [rsp + 10*16 + 0*8]
87 mov r15, [rsp + 10*16 + 1*8]
91 %elifidn __OUTPUT_FORMAT__, elf64
111 ;;; gf_3vect_mad_avx2(len, vec, vec_i, mul_array, src, dest)
116 %define mul_array arg3
120 %define pos.w return.w
122 %define dest2 mul_array
125 %ifndef EC_ALIGNED_ADDR
126 ;;; Use Un-aligned load/store
130 ;;; Use Non-temporal load/stor
135 %define XLDR vmovntdqa
136 %define XSTR vmovntdq
146 %define xmask0f ymm15
147 %define xmask0fx xmm15
148 %define xgft1_lo ymm14
149 %define xgft1_hi ymm13
150 %define xgft2_lo ymm12
151 %define xgft3_lo ymm11
168 global gf_3vect_mad_avx2:function
169 func(gf_3vect_mad_avx2)
175 vpinsrb xmask0fx, xmask0fx, tmp.w, 0
176 vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
178 sal vec_i, 5 ;Multiply by 32
180 lea tmp, [mul_array + vec_i]
182 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f}
183 ; " Ax{00}, Ax{10}, ..., Ax{f0}
184 vperm2i128 xgft1_hi, xgft1_lo, xgft1_lo, 0x11 ; swapped to hi | hi
185 vperm2i128 xgft1_lo, xgft1_lo, xgft1_lo, 0x00 ; swapped to lo | lo
187 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
188 ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
189 vmovdqu xgft3_lo, [tmp+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
190 ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
191 mov dest2, [dest1+PS] ; reuse mul_array
192 mov dest3, [dest1+2*PS] ; reuse vec_i
196 XLDR x0, [src+pos] ;Get next source vector
197 XLDR xd1, [dest1+pos] ;Get next dest vector
198 XLDR xd2, [dest2+pos] ;Get next dest vector
199 XLDR xd3, [dest3+pos] ;Get next dest vector
200 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi
201 vperm2i128 xtmpl2, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo
203 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi
204 vperm2i128 xtmpl3, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo
206 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
207 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
208 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
211 vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble
212 vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble
213 vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
214 vpxor xd1, xd1, xtmph1 ;xd1 += partial
217 vpshufb xtmph2, x0 ;Lookup mul table of high nibble
218 vpshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
219 vpxor xtmph2, xtmpl2 ;GF add high and low partials
220 vpxor xd2, xtmph2 ;xd2 += partial
223 vpshufb xtmph3, x0 ;Lookup mul table of high nibble
224 vpshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
225 vpxor xtmph3, xtmpl3 ;GF add high and low partials
226 vpxor xd3, xtmph3 ;xd3 += partial
228 XSTR [dest1+pos], xd1
229 XSTR [dest2+pos], xd2
230 XSTR [dest3+pos], xd3
232 add pos, 32 ;Loop on 32 bytes at a time
242 ;; Do one more overlap pass
244 vpinsrb xtmpl2x, xtmpl2x, tmp.w, 0
245 vpbroadcastb xtmpl2, xtmpl2x ;Construct mask 0x1f1f1f...
247 mov tmp, len ;Overlapped offset length-32
249 XLDR x0, [src+tmp] ;Get next source vector
250 XLDR xd1, [dest1+tmp] ;Get next dest vector
251 XLDR xd2, [dest2+tmp] ;Get next dest vector
252 XLDR xd3, [dest3+tmp] ;Get next dest vector
256 vmovdqa xtmph3, [constip32] ;Load const of i + 32
257 vpinsrb xtmpl3x, xtmpl3x, len.w, 15
258 vinserti128 xtmpl3, xtmpl3, xtmpl3x, 1 ;swapped to xtmpl3x | xtmpl3x
259 vpshufb xtmpl3, xtmpl3, xtmpl2 ;Broadcast len to all bytes. xtmpl2=0x1f1f1f...
260 vpcmpgtb xtmpl3, xtmpl3, xtmph3
262 vperm2i128 xtmph2, xgft2_lo, xgft2_lo, 0x11 ; swapped to hi | hi
263 vperm2i128 xgft2_lo, xgft2_lo, xgft2_lo, 0x00 ; swapped to lo | lo
265 vperm2i128 xtmph3, xgft3_lo, xgft3_lo, 0x11 ; swapped to hi | hi
266 vperm2i128 xgft3_lo, xgft3_lo, xgft3_lo, 0x00 ; swapped to lo | lo
268 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
269 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
270 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
273 vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble
274 vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble
275 vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
276 vpand xtmph1, xtmph1, xtmpl3
277 vpxor xd1, xd1, xtmph1 ;xd1 += partial
280 vpshufb xtmph2, xtmph2, x0 ;Lookup mul table of high nibble
281 vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble
282 vpxor xtmph2, xtmph2, xgft2_lo ;GF add high and low partials
283 vpand xtmph2, xtmph2, xtmpl3
284 vpxor xd2, xd2, xtmph2 ;xd2 += partial
287 vpshufb xtmph3, xtmph3, x0 ;Lookup mul table of high nibble
288 vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble
289 vpxor xtmph3, xtmph3, xgft3_lo ;GF add high and low partials
290 vpand xtmph3, xtmph3, xtmpl3
291 vpxor xd3, xd3, xtmph3 ;xd3 += partial
293 XSTR [dest1+tmp], xd1
294 XSTR [dest2+tmp], xd2
295 XSTR [dest3+tmp], xd3
313 ddq 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
314 ddq 0xe0e1e2e3e4e5e6e7e8e9eaebecedeeef
316 ;;; func core, ver, snum
317 slversion gf_3vect_mad_avx2, 04, 01, 0208