]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
2 | ; Copyright(c) 2011-2015 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
5 | ; modification, are permitted provided that the following conditions | |
6 | ; are met: | |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | ;;; | |
31 | ;;; gf_2vect_mad_avx512(len, vec, vec_i, mul_array, src, dest); | |
32 | ;;; | |
33 | ||
34 | %include "reg_sizes.asm" | |
35 | ||
36 | %ifdef HAVE_AS_KNOWS_AVX512 | |
37 | ||
38 | %ifidn __OUTPUT_FORMAT__, elf64 | |
39 | %define arg0 rdi | |
40 | %define arg1 rsi | |
41 | %define arg2 rdx | |
42 | %define arg3 rcx | |
43 | %define arg4 r8 | |
44 | %define arg5 r9 | |
45 | %define tmp r11 | |
46 | %define tmp2 r10 | |
47 | %define return rax | |
48 | %define func(x) x: | |
49 | %define FUNC_SAVE | |
50 | %define FUNC_RESTORE | |
51 | %endif | |
52 | ||
53 | %ifidn __OUTPUT_FORMAT__, win64 | |
54 | %define arg0 rcx | |
55 | %define arg1 rdx | |
56 | %define arg2 r8 | |
57 | %define arg3 r9 | |
58 | %define arg4 r12 | |
59 | %define arg5 r15 | |
60 | %define tmp r11 | |
61 | %define tmp2 r10 | |
62 | %define return rax | |
63 | %define stack_size 16*9 + 3*8 ; must be an odd multiple of 8 | |
64 | %define arg(x) [rsp + stack_size + PS + PS*x] | |
65 | ||
66 | %define func(x) proc_frame x | |
67 | %macro FUNC_SAVE 0 | |
68 | sub rsp, stack_size | |
69 | vmovdqa [rsp+16*0],xmm6 | |
70 | vmovdqa [rsp+16*1],xmm7 | |
71 | vmovdqa [rsp+16*2],xmm8 | |
72 | vmovdqa [rsp+16*3],xmm9 | |
73 | vmovdqa [rsp+16*4],xmm10 | |
74 | vmovdqa [rsp+16*5],xmm11 | |
75 | vmovdqa [rsp+16*6],xmm12 | |
76 | vmovdqa [rsp+16*7],xmm13 | |
77 | vmovdqa [rsp+16*8],xmm14 | |
78 | save_reg r12, 9*16 + 0*8 | |
79 | save_reg r15, 9*16 + 1*8 | |
80 | end_prolog | |
81 | mov arg4, arg(4) | |
82 | mov arg5, arg(5) | |
83 | %endmacro | |
84 | ||
85 | %macro FUNC_RESTORE 0 | |
86 | vmovdqa xmm6, [rsp+16*0] | |
87 | vmovdqa xmm7, [rsp+16*1] | |
88 | vmovdqa xmm8, [rsp+16*2] | |
89 | vmovdqa xmm9, [rsp+16*3] | |
90 | vmovdqa xmm10, [rsp+16*4] | |
91 | vmovdqa xmm11, [rsp+16*5] | |
92 | vmovdqa xmm12, [rsp+16*6] | |
93 | vmovdqa xmm13, [rsp+16*7] | |
94 | vmovdqa xmm14, [rsp+16*8] | |
95 | mov r12, [rsp + 9*16 + 0*8] | |
96 | mov r15, [rsp + 9*16 + 1*8] | |
97 | add rsp, stack_size | |
98 | %endmacro | |
99 | %endif | |
100 | ||
101 | ||
102 | %define PS 8 | |
103 | %define len arg0 | |
104 | %define len.w arg0.w | |
105 | %define vec arg1 | |
106 | %define vec_i arg2 | |
107 | %define mul_array arg3 | |
108 | %define src arg4 | |
109 | %define dest1 arg5 | |
110 | %define pos return | |
111 | %define pos.w return.w | |
112 | %define dest2 tmp2 | |
113 | ||
114 | %ifndef EC_ALIGNED_ADDR | |
115 | ;;; Use Un-aligned load/store | |
116 | %define XLDR vmovdqu8 | |
117 | %define XSTR vmovdqu8 | |
118 | %else | |
119 | ;;; Use Non-temporal load/stor | |
120 | %ifdef NO_NT_LDST | |
121 | %define XLDR vmovdqa | |
122 | %define XSTR vmovdqa | |
123 | %else | |
124 | %define XLDR vmovntdqa | |
125 | %define XSTR vmovntdq | |
126 | %endif | |
127 | %endif | |
128 | ||
129 | default rel | |
130 | [bits 64] | |
131 | section .text | |
132 | ||
133 | %define x0 zmm0 | |
134 | %define xtmpa zmm1 | |
135 | %define xtmph1 zmm2 | |
136 | %define xtmpl1 zmm3 | |
137 | %define xtmph2 zmm4 | |
138 | %define xtmpl2 zmm5 | |
139 | %define xd1 zmm6 | |
140 | %define xd2 zmm7 | |
141 | %define xtmpd1 zmm8 | |
142 | %define xtmpd2 zmm9 | |
143 | %define xgft1_hi zmm10 | |
144 | %define xgft1_lo zmm11 | |
145 | %define xgft1_loy ymm11 | |
146 | %define xgft2_hi zmm12 | |
147 | %define xgft2_lo zmm13 | |
148 | %define xgft2_loy ymm13 | |
149 | %define xmask0f zmm14 | |
150 | ||
151 | align 16 | |
152 | global gf_2vect_mad_avx512:function | |
153 | func(gf_2vect_mad_avx512) | |
154 | FUNC_SAVE | |
155 | sub len, 64 | |
156 | jl .return_fail | |
157 | xor pos, pos | |
158 | mov tmp, 0x0f | |
159 | vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f... | |
160 | sal vec_i, 5 ;Multiply by 32 | |
161 | sal vec, 5 | |
162 | lea tmp, [mul_array + vec_i] | |
163 | vmovdqu xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0} | |
164 | vmovdqu xgft2_loy, [tmp+vec] ;Load array Bx{00}..{0f}, Bx{00}..{f0} | |
165 | vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55 | |
166 | vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00 | |
167 | vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55 | |
168 | vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00 | |
169 | mov dest2, [dest1+PS] ; reuse mul_array | |
170 | mov dest1, [dest1] | |
171 | mov tmp, -1 | |
172 | kmovq k1, tmp | |
173 | ||
174 | .loop64: | |
175 | XLDR xd1, [dest1+pos] ;Get next dest vector | |
176 | XLDR xd2, [dest2+pos] ;Get next dest vector | |
177 | XLDR x0, [src+pos] ;Get next source vector | |
178 | ||
179 | vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
180 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
181 | vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
182 | ||
183 | vpshufb xtmph1 {k1}{z}, xgft1_hi, x0 ;Lookup mul table of high nibble | |
184 | vpshufb xtmpl1 {k1}{z}, xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
185 | vpxorq xtmph1, xtmph1, xtmpl1 ;GF add high and low partials | |
186 | vpxorq xd1, xd1, xtmph1 ;xd1 += partial | |
187 | ||
188 | vpshufb xtmph2 {k1}{z}, xgft2_hi, x0 ;Lookup mul table of high nibble | |
189 | vpshufb xtmpl2 {k1}{z}, xgft2_lo, xtmpa ;Lookup mul table of low nibble | |
190 | vpxorq xtmph2, xtmph2, xtmpl2 ;GF add high and low partials | |
191 | vpxorq xd2, xd2, xtmph2 ;xd2 += partial | |
192 | ||
193 | XSTR [dest1+pos], xd1 | |
194 | XSTR [dest2+pos], xd2 | |
195 | ||
196 | add pos, 64 ;Loop on 64 bytes at a time | |
197 | cmp pos, len | |
198 | jle .loop64 | |
199 | ||
200 | lea tmp, [len + 64] | |
201 | cmp pos, tmp | |
202 | je .return_pass | |
203 | ||
204 | ;; Tail len | |
205 | mov pos, (1 << 63) | |
206 | lea tmp, [len + 64 - 1] | |
207 | and tmp, 63 | |
208 | sarx pos, pos, tmp | |
209 | kmovq k1, pos | |
210 | mov pos, len ;Overlapped offset length-64 | |
211 | jmp .loop64 ;Do one more overlap pass | |
212 | ||
213 | .return_pass: | |
214 | mov return, 0 | |
215 | FUNC_RESTORE | |
216 | ret | |
217 | ||
218 | .return_fail: | |
219 | mov return, 1 | |
220 | FUNC_RESTORE | |
221 | ret | |
222 | ||
223 | endproc_frame | |
224 | ||
225 | %else | |
226 | %ifidn __OUTPUT_FORMAT__, win64 | |
227 | global no_gf_2vect_mad_avx512 | |
228 | no_gf_2vect_mad_avx512: | |
229 | %endif | |
230 | %endif ; ifdef HAVE_AS_KNOWS_AVX512 |