]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
2 | ; Copyright(c) 2011-2015 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
f91f0fd5 | 5 | ; modification, are permitted provided that the following conditions |
7c673cae FG |
6 | ; are met: |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | ;;; | |
31 | ;;; gf_3vect_dot_prod_avx512(len, vec, *g_tbls, **buffs, **dests); | |
32 | ;;; | |
33 | ||
34 | %include "reg_sizes.asm" | |
35 | ||
36 | %ifdef HAVE_AS_KNOWS_AVX512 | |
37 | ||
38 | %ifidn __OUTPUT_FORMAT__, elf64 | |
39 | %define arg0 rdi | |
40 | %define arg1 rsi | |
41 | %define arg2 rdx | |
42 | %define arg3 rcx | |
43 | %define arg4 r8 | |
44 | %define arg5 r9 | |
45 | ||
46 | %define tmp r11 | |
47 | %define tmp.w r11d | |
48 | %define tmp.b r11b | |
49 | %define tmp2 r10 | |
50 | %define tmp3 r13 ; must be saved and restored | |
51 | %define tmp4 r12 ; must be saved and restored | |
52 | %define return rax | |
53 | %define PS 8 | |
54 | %define LOG_PS 3 | |
55 | ||
56 | %define func(x) x: | |
57 | %macro FUNC_SAVE 0 | |
58 | push r12 | |
59 | push r13 | |
60 | %endmacro | |
61 | %macro FUNC_RESTORE 0 | |
62 | pop r13 | |
63 | pop r12 | |
64 | %endmacro | |
65 | %endif | |
66 | ||
67 | %ifidn __OUTPUT_FORMAT__, win64 | |
68 | %define arg0 rcx | |
69 | %define arg1 rdx | |
70 | %define arg2 r8 | |
71 | %define arg3 r9 | |
72 | ||
73 | %define arg4 r12 ; must be saved, loaded and restored | |
74 | %define arg5 r15 ; must be saved and restored | |
75 | %define tmp r11 | |
76 | %define tmp.w r11d | |
77 | %define tmp.b r11b | |
78 | %define tmp2 r10 | |
79 | %define tmp3 r13 ; must be saved and restored | |
80 | %define tmp4 r14 ; must be saved and restored | |
81 | %define return rax | |
82 | %define PS 8 | |
83 | %define LOG_PS 3 | |
84 | %define stack_size 9*16 + 5*8 ; must be an odd multiple of 8 | |
85 | %define arg(x) [rsp + stack_size + PS + PS*x] | |
86 | ||
87 | %define func(x) proc_frame x | |
88 | %macro FUNC_SAVE 0 | |
89 | alloc_stack stack_size | |
90 | vmovdqa [rsp + 0*16], xmm6 | |
91 | vmovdqa [rsp + 1*16], xmm7 | |
92 | vmovdqa [rsp + 2*16], xmm8 | |
93 | vmovdqa [rsp + 3*16], xmm9 | |
94 | vmovdqa [rsp + 4*16], xmm10 | |
95 | vmovdqa [rsp + 5*16], xmm11 | |
96 | vmovdqa [rsp + 6*16], xmm12 | |
97 | vmovdqa [rsp + 7*16], xmm13 | |
98 | vmovdqa [rsp + 8*16], xmm14 | |
99 | save_reg r12, 9*16 + 0*8 | |
100 | save_reg r13, 9*16 + 1*8 | |
101 | save_reg r14, 9*16 + 2*8 | |
102 | save_reg r15, 9*16 + 3*8 | |
103 | end_prolog | |
104 | mov arg4, arg(4) | |
105 | %endmacro | |
106 | ||
107 | %macro FUNC_RESTORE 0 | |
108 | vmovdqa xmm6, [rsp + 0*16] | |
109 | vmovdqa xmm7, [rsp + 1*16] | |
110 | vmovdqa xmm8, [rsp + 2*16] | |
111 | vmovdqa xmm9, [rsp + 3*16] | |
112 | vmovdqa xmm10, [rsp + 4*16] | |
113 | vmovdqa xmm11, [rsp + 5*16] | |
114 | vmovdqa xmm12, [rsp + 6*16] | |
115 | vmovdqa xmm13, [rsp + 7*16] | |
116 | vmovdqa xmm14, [rsp + 8*16] | |
117 | mov r12, [rsp + 9*16 + 0*8] | |
118 | mov r13, [rsp + 9*16 + 1*8] | |
119 | mov r14, [rsp + 9*16 + 2*8] | |
120 | mov r15, [rsp + 9*16 + 3*8] | |
121 | add rsp, stack_size | |
122 | %endmacro | |
123 | %endif | |
124 | ||
125 | ||
126 | %define len arg0 | |
127 | %define vec arg1 | |
128 | %define mul_array arg2 | |
129 | %define src arg3 | |
130 | %define dest1 arg4 | |
131 | %define ptr arg5 | |
132 | %define vec_i tmp2 | |
133 | %define dest2 tmp3 | |
134 | %define dest3 tmp4 | |
135 | %define pos return | |
136 | ||
137 | ||
138 | %ifndef EC_ALIGNED_ADDR | |
139 | ;;; Use Un-aligned load/store | |
140 | %define XLDR vmovdqu8 | |
141 | %define XSTR vmovdqu8 | |
142 | %else | |
143 | ;;; Use Non-temporal load/stor | |
144 | %ifdef NO_NT_LDST | |
145 | %define XLDR vmovdqa | |
146 | %define XSTR vmovdqa | |
147 | %else | |
148 | %define XLDR vmovntdqa | |
149 | %define XSTR vmovntdq | |
150 | %endif | |
151 | %endif | |
152 | ||
153 | %define xmask0f zmm11 | |
154 | %define xgft1_lo zmm10 | |
155 | %define xgft1_loy ymm10 | |
156 | %define xgft1_hi zmm9 | |
157 | %define xgft2_lo zmm8 | |
158 | %define xgft2_loy ymm8 | |
159 | %define xgft2_hi zmm7 | |
160 | %define xgft3_lo zmm6 | |
161 | %define xgft3_loy ymm6 | |
162 | %define xgft3_hi zmm5 | |
163 | ||
164 | %define x0 zmm0 | |
165 | %define xtmpa zmm1 | |
166 | %define xp1 zmm2 | |
167 | %define xp2 zmm3 | |
168 | %define xp3 zmm4 | |
169 | ||
170 | default rel | |
171 | [bits 64] | |
172 | ||
173 | section .text | |
174 | ||
175 | align 16 | |
f91f0fd5 | 176 | global gf_3vect_dot_prod_avx512:ISAL_SYM_TYPE_FUNCTION |
7c673cae FG |
177 | func(gf_3vect_dot_prod_avx512) |
178 | FUNC_SAVE | |
179 | sub len, 64 | |
180 | jl .return_fail | |
181 | ||
182 | xor pos, pos | |
183 | mov tmp, 0x0f | |
184 | vpbroadcastb xmask0f, tmp ;Construct mask 0x0f0f0f... | |
185 | sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS | |
186 | mov dest2, [dest1+PS] | |
187 | mov dest3, [dest1+2*PS] | |
188 | mov dest1, [dest1] | |
189 | ||
190 | .loop64: | |
191 | vpxorq xp1, xp1, xp1 | |
192 | vpxorq xp2, xp2, xp2 | |
193 | vpxorq xp3, xp3, xp3 | |
194 | mov tmp, mul_array | |
195 | xor vec_i, vec_i | |
196 | ||
197 | .next_vect: | |
198 | mov ptr, [src+vec_i] | |
199 | XLDR x0, [ptr+pos] ;Get next source vector | |
200 | add vec_i, PS | |
201 | ||
202 | vpandq xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 | |
203 | vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 | |
204 | vpandq x0, x0, xmask0f ;Mask high src nibble in bits 4-0 | |
205 | ||
206 | vmovdqu8 xgft1_loy, [tmp] ;Load array Ax{00}..{0f}, Ax{00}..{f0} | |
207 | vmovdqu8 xgft2_loy, [tmp+vec*(32/PS)] ;Load array Bx{00}..{0f}, Bx{00}..{f0} | |
208 | vmovdqu8 xgft3_loy, [tmp+vec*(64/PS)] ;Load array Cx{00}..{0f}, Cx{00}..{f0} | |
209 | add tmp, 32 | |
210 | ||
211 | vshufi64x2 xgft1_hi, xgft1_lo, xgft1_lo, 0x55 | |
212 | vshufi64x2 xgft1_lo, xgft1_lo, xgft1_lo, 0x00 | |
213 | vshufi64x2 xgft2_hi, xgft2_lo, xgft2_lo, 0x55 | |
214 | vshufi64x2 xgft2_lo, xgft2_lo, xgft2_lo, 0x00 | |
215 | ||
216 | vpshufb xgft1_hi, xgft1_hi, x0 ;Lookup mul table of high nibble | |
217 | vpshufb xgft1_lo, xgft1_lo, xtmpa ;Lookup mul table of low nibble | |
218 | vpxorq xgft1_hi, xgft1_hi, xgft1_lo ;GF add high and low partials | |
219 | vpxorq xp1, xp1, xgft1_hi ;xp1 += partial | |
220 | ||
221 | vpshufb xgft2_hi, xgft2_hi, x0 ;Lookup mul table of high nibble | |
222 | vpshufb xgft2_lo, xgft2_lo, xtmpa ;Lookup mul table of low nibble | |
223 | vpxorq xgft2_hi, xgft2_hi, xgft2_lo ;GF add high and low partials | |
224 | vpxorq xp2, xp2, xgft2_hi ;xp2 += partial | |
225 | ||
226 | vshufi64x2 xgft3_hi, xgft3_lo, xgft3_lo, 0x55 | |
227 | vshufi64x2 xgft3_lo, xgft3_lo, xgft3_lo, 0x00 | |
228 | ||
229 | vpshufb xgft3_hi, xgft3_hi, x0 ;Lookup mul table of high nibble | |
230 | vpshufb xgft3_lo, xgft3_lo, xtmpa ;Lookup mul table of low nibble | |
231 | vpxorq xgft3_hi, xgft3_hi, xgft3_lo ;GF add high and low partials | |
232 | vpxorq xp3, xp3, xgft3_hi ;xp3 += partial | |
233 | ||
234 | cmp vec_i, vec | |
235 | jl .next_vect | |
236 | ||
237 | XSTR [dest1+pos], xp1 | |
238 | XSTR [dest2+pos], xp2 | |
239 | XSTR [dest3+pos], xp3 | |
240 | ||
241 | add pos, 64 ;Loop on 64 bytes at a time | |
242 | cmp pos, len | |
243 | jle .loop64 | |
244 | ||
245 | lea tmp, [len + 64] | |
246 | cmp pos, tmp | |
247 | je .return_pass | |
248 | ||
249 | ;; Tail len | |
250 | mov pos, len ;Overlapped offset length-64 | |
251 | jmp .loop64 ;Do one more overlap pass | |
252 | ||
253 | .return_pass: | |
254 | mov return, 0 | |
255 | FUNC_RESTORE | |
256 | ret | |
257 | ||
258 | .return_fail: | |
259 | mov return, 1 | |
260 | FUNC_RESTORE | |
261 | ret | |
262 | ||
263 | endproc_frame | |
264 | ||
265 | %else | |
266 | %ifidn __OUTPUT_FORMAT__, win64 | |
267 | global no_gf_3vect_dot_prod_avx512 | |
268 | no_gf_3vect_dot_prod_avx512: | |
269 | %endif | |
270 | %endif ; ifdef HAVE_AS_KNOWS_AVX512 |