1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_2vect_mad_avx(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
50 %define stack_size 16*9 + 3*8
51 %define arg(x) [rsp + stack_size + PS + PS*x]
52 %define func(x) proc_frame x
56 movdqa [rsp+16*0],xmm6
57 movdqa [rsp+16*1],xmm7
58 movdqa [rsp+16*2],xmm8
59 movdqa [rsp+16*3],xmm9
60 movdqa [rsp+16*4],xmm10
61 movdqa [rsp+16*5],xmm11
62 movdqa [rsp+16*6],xmm12
63 movdqa [rsp+16*7],xmm13
64 movdqa [rsp+16*8],xmm14
65 save_reg r12, 9*16 + 0*8
66 save_reg r15, 9*16 + 1*8
73 movdqa xmm6, [rsp+16*0]
74 movdqa xmm7, [rsp+16*1]
75 movdqa xmm8, [rsp+16*2]
76 movdqa xmm9, [rsp+16*3]
77 movdqa xmm10, [rsp+16*4]
78 movdqa xmm11, [rsp+16*5]
79 movdqa xmm12, [rsp+16*6]
80 movdqa xmm13, [rsp+16*7]
81 movdqa xmm14, [rsp+16*8]
82 mov r12, [rsp + 9*16 + 0*8]
83 mov r15, [rsp + 9*16 + 1*8]
87 %elifidn __OUTPUT_FORMAT__, elf64
105 ;;; gf_2vect_mad_avx(len, vec, vec_i, mul_array, src, dest)
110 %define mul_array arg3
114 %define pos.w return.w
118 %ifndef EC_ALIGNED_ADDR
119 ;;; Use Un-aligned load/store
123 ;;; Use Non-temporal load/stor
128 %define XLDR vmovntdqa
129 %define XSTR vmovntdq
139 %define xmask0f xmm14
140 %define xgft1_lo xmm13
141 %define xgft1_hi xmm12
142 %define xgft2_lo xmm11
143 %define xgft2_hi xmm10
158 global gf_2vect_mad_avx:function
160 func(gf_2vect_mad_avx)
166 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
167 sal vec_i, 5 ;Multiply by 32
169 lea tmp, [mul_array + vec_i]
170 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
171 vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
172 vmovdqu xgft2_lo, [tmp+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
173 vmovdqu xgft2_hi, [tmp+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
175 mov dest2, [dest1+PS]
178 XLDR xtmpd1, [dest1+len] ;backup the last 16 bytes in dest
179 XLDR xtmpd2, [dest2+len] ;backup the last 16 bytes in dest
182 XLDR xd1, [dest1+pos] ;Get next dest vector
183 XLDR xd2, [dest2+pos] ;Get next dest vector
185 XLDR x0, [src+pos] ;Get next source vector
187 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
188 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
189 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
191 vpshufb xtmph1, xgft1_hi, x0 ;Lookup mul table of high nibble
192 vpshufb xtmpl1, xgft1_lo, xtmpa ;Lookup mul table of low nibble
193 vpxor xtmph1, xtmph1, xtmpl1 ;GF add high and low partials
194 vpxor xd1, xd1, xtmph1 ;xd1 += partial
196 vpshufb xtmph2, xgft2_hi, x0 ;Lookup mul table of high nibble
197 vpshufb xtmpl2, xgft2_lo, xtmpa ;Lookup mul table of low nibble
198 vpxor xtmph2, xtmph2, xtmpl2 ;GF add high and low partials
199 vpxor xd2, xd2, xtmph2 ;xd2 += partial
201 XSTR [dest1+pos], xd1
202 XSTR [dest2+pos], xd2
204 add pos, 16 ;Loop on 16 bytes at a time
213 mov pos, len ;Overlapped offset length-16
214 vmovdqa xd1, xtmpd1 ;Restore xd1
215 vmovdqa xd2, xtmpd2 ;Restore xd2
216 jmp .loop16_overlap ;Do one more overlap pass
233 mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
235 ;;; func core, ver, snum
236 slversion gf_2vect_mad_avx, 02, 01, 0204