1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
31 ;;; gf_vect_mad_avx(len, vec, vec_i, mul_array, src, dest);
34 %include "reg_sizes.asm"
36 %ifidn __OUTPUT_FORMAT__, win64
48 %define stack_size 16*3 + 3*8
49 %define arg(x) [rsp + stack_size + PS + PS*x]
50 %define func(x) proc_frame x
54 vmovdqa [rsp+16*0],xmm6
55 vmovdqa [rsp+16*1],xmm7
56 vmovdqa [rsp+16*2],xmm8
57 save_reg r12, 3*16 + 0*8
58 save_reg r15, 3*16 + 1*8
65 vmovdqa xmm6, [rsp+16*0]
66 vmovdqa xmm7, [rsp+16*1]
67 vmovdqa xmm8, [rsp+16*2]
68 mov r12, [rsp + 3*16 + 0*8]
69 mov r15, [rsp + 3*16 + 1*8]
73 %elifidn __OUTPUT_FORMAT__, elf64
90 ;;; gf_vect_mad_avx(len, vec, vec_i, mul_array, src, dest)
95 %define mul_array arg3
99 %define pos.w return.w
101 %ifndef EC_ALIGNED_ADDR
102 ;;; Use Un-aligned load/store
106 ;;; Use Non-temporal load/stor
111 %define XLDR vmovntdqa
112 %define XSTR vmovntdq
134 global gf_vect_mad_avx:function
135 func(gf_vect_mad_avx)
141 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
143 sal vec_i, 5 ;Multiply by 32
144 vmovdqu xgft_lo, [vec_i+mul_array] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
145 vmovdqu xgft_hi, [vec_i+mul_array+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
147 XLDR xtmpd, [dest+len] ;backup the last 16 bytes in dest
150 XLDR xd, [dest+pos] ;Get next dest vector
152 XLDR x0, [src+pos] ;Get next source vector
154 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
155 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
156 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
158 vpshufb xtmph, xgft_hi, x0 ;Lookup mul table of high nibble
159 vpshufb xtmpl, xgft_lo, xtmpa ;Lookup mul table of low nibble
160 vpxor xtmph, xtmph, xtmpl ;GF add high and low partials
161 vpxor xd, xd, xtmph ;xd += partial
164 add pos, 16 ;Loop on 16 bytes at a time
173 mov pos, len ;Overlapped offset length-16
174 vmovdqa xd, xtmpd ;Restore xd
175 jmp .loop16_overlap ;Do one more overlap pass
193 mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
195 ;;; func core, ver, snum
196 slversion gf_vect_mad_avx, 02, 01, 0201