1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_6vect_mad_sse(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
38 %ifidn __OUTPUT_FORMAT__, win64
54 %define stack_size 16*10 + 5*8
55 %define arg(x) [rsp + stack_size + PS + PS*x]
56 %define func(x) proc_frame x
60 movdqa [rsp+16*0],xmm6
61 movdqa [rsp+16*1],xmm7
62 movdqa [rsp+16*2],xmm8
63 movdqa [rsp+16*3],xmm9
64 movdqa [rsp+16*4],xmm10
65 movdqa [rsp+16*5],xmm11
66 movdqa [rsp+16*6],xmm12
67 movdqa [rsp+16*7],xmm13
68 movdqa [rsp+16*8],xmm14
69 movdqa [rsp+16*9],xmm15
70 save_reg r12, 10*16 + 0*8
71 save_reg r13, 10*16 + 1*8
72 save_reg r14, 10*16 + 2*8
73 save_reg r15, 10*16 + 3*8
74 save_reg rdi, 10*16 + 4*8
81 movdqa xmm6, [rsp+16*0]
82 movdqa xmm7, [rsp+16*1]
83 movdqa xmm8, [rsp+16*2]
84 movdqa xmm9, [rsp+16*3]
85 movdqa xmm10, [rsp+16*4]
86 movdqa xmm11, [rsp+16*5]
87 movdqa xmm12, [rsp+16*6]
88 movdqa xmm13, [rsp+16*7]
89 movdqa xmm14, [rsp+16*8]
90 movdqa xmm15, [rsp+16*9]
91 mov r12, [rsp + 10*16 + 0*8]
92 mov r13, [rsp + 10*16 + 1*8]
93 mov r14, [rsp + 10*16 + 2*8]
94 mov r15, [rsp + 10*16 + 3*8]
95 mov rdi, [rsp + 10*16 + 4*8]
99 %elifidn __OUTPUT_FORMAT__, elf64
122 %macro FUNC_RESTORE 0
129 ;;; gf_6vect_mad_sse(len, vec, vec_i, mul_array, src, dest)
134 %define mul_array arg3
138 %define pos.w return.w
140 %define dest2 mul_array
146 %ifndef EC_ALIGNED_ADDR
147 ;;; Use Un-aligned load/store
151 ;;; Use Non-temporal load/stor
156 %define XLDR movntdqa
166 %define xmask0f xmm15
167 %define xgft4_lo xmm14
168 %define xgft4_hi xmm13
169 %define xgft5_lo xmm12
170 %define xgft5_hi xmm11
171 %define xgft6_lo xmm10
172 %define xgft6_hi xmm9
188 global gf_6vect_mad_sse:function
189 func(gf_6vect_mad_sse)
195 movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
198 sal vec_i, 5 ;Multiply by 32
199 lea tmp3, [mul_array + vec_i]
200 sal tmp, 6 ;Multiply by 64
202 sal vec, 5 ;Multiply by 32
203 lea vec_i, [tmp + vec] ;vec_i = 96
204 lea mul_array, [tmp + vec_i] ;mul_array = 160
206 movdqu xgft5_lo, [tmp3+2*tmp] ;Load array Ex{00}, Ex{01}, ..., Ex{0f}
207 movdqu xgft5_hi, [tmp3+2*tmp+16] ; " Ex{00}, Ex{10}, ..., Ex{f0}
208 movdqu xgft4_lo, [tmp3+vec_i] ;Load array Dx{00}, Dx{01}, Dx{02}, ...
209 movdqu xgft4_hi, [tmp3+vec_i+16] ; " Dx{00}, Dx{10}, Dx{20}, ... , Dx{f0}
210 movdqu xgft6_lo, [tmp3+mul_array] ;Load array Fx{00}, Fx{01}, ..., Fx{0f}
211 movdqu xgft6_hi, [tmp3+mul_array+16] ; " Fx{00}, Fx{10}, ..., Fx{f0}
213 mov dest2, [dest1+PS]
214 mov dest3, [dest1+2*PS]
215 mov dest4, [dest1+3*PS] ; reuse mul_array
216 mov dest5, [dest1+4*PS]
217 mov dest6, [dest1+5*PS] ; reuse vec_i
221 XLDR x0, [src+pos] ;Get next source vector
223 movdqu xtmpl1, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
224 movdqu xtmph1, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
225 movdqu xtmpl2, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
226 movdqu xtmph2, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
227 movdqu xtmpl3, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
228 movdqu xtmph3, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
229 XLDR xd1, [dest1+pos] ;Get next dest vector
231 movdqa xtmpa, x0 ;Keep unshifted copy of src
232 psraw x0, 4 ;Shift to put high nibble into bits 4-0
233 pand x0, xmask0f ;Mask high src nibble in bits 4-0
234 pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
237 pshufb xtmph1, x0 ;Lookup mul table of high nibble
238 pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
239 pxor xtmph1, xtmpl1 ;GF add high and low partials
242 XLDR xd2, [dest2+pos] ;reuse xtmpl1. Get next dest vector
243 XLDR xd3, [dest3+pos] ;reuse xtmph1. Get next dest3 vector
246 pshufb xtmph2, x0 ;Lookup mul table of high nibble
247 pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
248 pxor xtmph2, xtmpl2 ;GF add high and low partials
252 pshufb xtmph3, x0 ;Lookup mul table of high nibble
253 pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
254 pxor xtmph3, xtmpl3 ;GF add high and low partials
257 XSTR [dest1+pos], xd1 ;Store result into dest1
258 XSTR [dest2+pos], xd2 ;Store result into dest2
259 XSTR [dest3+pos], xd3 ;Store result into dest3
261 movdqa xtmph1, xgft4_hi ;Reload const array registers
262 movdqa xtmpl1, xgft4_lo ;Reload const array registers
263 movdqa xtmph2, xgft5_hi ;Reload const array registers
264 movdqa xtmpl2, xgft5_lo ;Reload const array registers
265 movdqa xtmph3, xgft6_hi ;Reload const array registers
266 movdqa xtmpl3, xgft6_lo ;Reload const array registers
269 XLDR xd1, [dest4+pos] ;Get next dest vector
270 pshufb xtmph1, x0 ;Lookup mul table of high nibble
271 pshufb xtmpl1, xtmpa ;Lookup mul table of low nibble
272 pxor xtmph1, xtmpl1 ;GF add high and low partials
275 XLDR xd2, [dest5+pos] ;reuse xtmpl1. Get next dest vector
276 XLDR xd3, [dest6+pos] ;reuse xtmph1. Get next dest vector
279 pshufb xtmph2, x0 ;Lookup mul table of high nibble
280 pshufb xtmpl2, xtmpa ;Lookup mul table of low nibble
281 pxor xtmph2, xtmpl2 ;GF add high and low partials
285 pshufb xtmph3, x0 ;Lookup mul table of high nibble
286 pshufb xtmpl3, xtmpa ;Lookup mul table of low nibble
287 pxor xtmph3, xtmpl3 ;GF add high and low partials
290 XSTR [dest4+pos], xd1 ;Store result into dest4
291 XSTR [dest5+pos], xd2 ;Store result into dest5
292 XSTR [dest6+pos], xd3 ;Store result into dest6
294 add pos, 16 ;Loop on 16 bytes at a time
304 ;; Do one more overlap pass
305 ;; Overlapped offset length-16
306 mov tmp, len ;Backup len as len=rdi
308 XLDR x0, [src+tmp] ;Get next source vector
309 XLDR xd1, [dest4+tmp] ;Get next dest vector
310 XLDR xd2, [dest5+tmp] ;reuse xtmpl1. Get next dest vector
311 XLDR xd3, [dest6+tmp] ;reuse xtmph1. Get next dest vector
315 movdqa xtmph3, [constip16] ;Load const of i + 16
316 pinsrb xtmpl3, len.w, 15
317 pshufb xtmpl3, xmask0f ;Broadcast len to all bytes
318 pcmpgtb xtmpl3, xtmph3
320 movdqa xtmpa, x0 ;Keep unshifted copy of src
321 psraw x0, 4 ;Shift to put high nibble into bits 4-0
322 pand x0, xmask0f ;Mask high src nibble in bits 4-0
323 pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
326 pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
327 pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
328 pxor xgft4_hi, xgft4_lo ;GF add high and low partials
329 pand xgft4_hi, xtmpl3
333 pshufb xgft5_hi, x0 ;Lookup mul table of high nibble
334 pshufb xgft5_lo, xtmpa ;Lookup mul table of low nibble
335 pxor xgft5_hi, xgft5_lo ;GF add high and low partials
336 pand xgft5_hi, xtmpl3
340 pshufb xgft6_hi, x0 ;Lookup mul table of high nibble
341 pshufb xgft6_lo, xtmpa ;Lookup mul table of low nibble
342 pxor xgft6_hi, xgft6_lo ;GF add high and low partials
343 pand xgft6_hi, xtmpl3
346 XSTR [dest4+tmp], xd1 ;Store result into dest4
347 XSTR [dest5+tmp], xd2 ;Store result into dest5
348 XSTR [dest6+tmp], xd3 ;Store result into dest6
350 movdqu xgft4_lo, [tmp3] ;Load array Ax{00}, Ax{01}, Ax{02}, ...
351 movdqu xgft4_hi, [tmp3+16] ; " Ax{00}, Ax{10}, Ax{20}, ... , Ax{f0}
352 movdqu xgft5_lo, [tmp3+vec] ;Load array Bx{00}, Bx{01}, Bx{02}, ...
353 movdqu xgft5_hi, [tmp3+vec+16] ; " Bx{00}, Bx{10}, Bx{20}, ... , Bx{f0}
354 movdqu xgft6_lo, [tmp3+2*vec] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
355 movdqu xgft6_hi, [tmp3+2*vec+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
356 XLDR xd1, [dest1+tmp] ;Get next dest vector
357 XLDR xd2, [dest2+tmp] ;reuse xtmpl1. Get next dest vector
358 XLDR xd3, [dest3+tmp] ;reuse xtmph1. Get next dest3 vector
361 pshufb xgft4_hi, x0 ;Lookup mul table of high nibble
362 pshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble
363 pxor xgft4_hi, xgft4_lo ;GF add high and low partials
364 pand xgft4_hi, xtmpl3
368 pshufb xgft5_hi, x0 ;Lookup mul table of high nibble
369 pshufb xgft5_lo, xtmpa ;Lookup mul table of low nibble
370 pxor xgft5_hi, xgft5_lo ;GF add high and low partials
371 pand xgft5_hi, xtmpl3
375 pshufb xgft6_hi, x0 ;Lookup mul table of high nibble
376 pshufb xgft6_lo, xtmpa ;Lookup mul table of low nibble
377 pxor xgft6_hi, xgft6_lo ;GF add high and low partials
378 pand xgft6_hi, xtmpl3
381 XSTR [dest1+tmp], xd1 ;Store result into dest1
382 XSTR [dest2+tmp], xd2 ;Store result into dest2
383 XSTR [dest3+tmp], xd3 ;Store result into dest3
401 mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
403 dq 0xf8f9fafbfcfdfeff, 0xf0f1f2f3f4f5f6f7
405 ;;; func core, ver, snum
406 slversion gf_6vect_mad_sse, 00, 01, 020f