]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
2 | ; Copyright(c) 2011-2016 Intel Corporation All rights reserved. | |
3 | ; | |
4 | ; Redistribution and use in source and binary forms, with or without | |
5 | ; modification, are permitted provided that the following conditions | |
6 | ; are met: | |
7 | ; * Redistributions of source code must retain the above copyright | |
8 | ; notice, this list of conditions and the following disclaimer. | |
9 | ; * Redistributions in binary form must reproduce the above copyright | |
10 | ; notice, this list of conditions and the following disclaimer in | |
11 | ; the documentation and/or other materials provided with the | |
12 | ; distribution. | |
13 | ; * Neither the name of Intel Corporation nor the names of its | |
14 | ; contributors may be used to endorse or promote products derived | |
15 | ; from this software without specific prior written permission. | |
16 | ; | |
17 | ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
29 | ||
30 | %ifndef BUFFER_UTILS | |
31 | %define BUFFER_UTILS | |
32 | ||
33 | %include "options.asm" | |
34 | ||
35 | extern pshufb_shf_table | |
36 | extern mask3 | |
37 | ||
38 | %ifdef FIX_CACHE_READ | |
39 | %define vmovntdqa vmovdqa | |
40 | %else | |
41 | %macro prefetchnta 1 | |
42 | %endm | |
43 | %endif | |
44 | ||
45 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
46 | ; code for doing the CRC calculation as part of copy-in, using pclmulqdq | |
47 | ||
48 | ; "shift" 4 input registers down 4 places | |
49 | ; macro FOLD4 xmm0, xmm1, xmm2, xmm3, const, tmp0, tmp1 | |
50 | %macro FOLD4 7 | |
51 | %define %%xmm0 %1 ; xmm reg, in/out | |
52 | %define %%xmm1 %2 ; xmm reg, in/out | |
53 | %define %%xmm2 %3 ; xmm reg, in/out | |
54 | %define %%xmm3 %4 ; xmm reg, in/out | |
55 | %define %%const %5 ; xmm reg, in | |
56 | %define %%tmp0 %6 ; xmm reg, tmp | |
57 | %define %%tmp1 %7 ; xmm reg, tmp | |
58 | ||
59 | vmovaps %%tmp0, %%xmm0 | |
60 | vmovaps %%tmp1, %%xmm1 | |
61 | ||
62 | vpclmulqdq %%xmm0, %%const, 0x01 | |
63 | vpclmulqdq %%xmm1, %%const, 0x01 | |
64 | ||
65 | vpclmulqdq %%tmp0, %%const, 0x10 | |
66 | vpclmulqdq %%tmp1, %%const, 0x10 | |
67 | ||
68 | vxorps %%xmm0, %%tmp0 | |
69 | vxorps %%xmm1, %%tmp1 | |
70 | ||
71 | ||
72 | vmovaps %%tmp0, %%xmm2 | |
73 | vmovaps %%tmp1, %%xmm3 | |
74 | ||
75 | vpclmulqdq %%xmm2, %%const, 0x01 | |
76 | vpclmulqdq %%xmm3, %%const, 0x01 | |
77 | ||
78 | vpclmulqdq %%tmp0, %%const, 0x10 | |
79 | vpclmulqdq %%tmp1, %%const, 0x10 | |
80 | ||
81 | vxorps %%xmm2, %%tmp0 | |
82 | vxorps %%xmm3, %%tmp1 | |
83 | %endm | |
84 | ||
85 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
86 | ||
87 | ; "shift" 3 input registers down 4 places | |
88 | ; macro FOLD3 x0, x1, x2, x3, const, tmp0 | |
89 | ; x0 x1 x2 x3 | |
90 | ; In A B C D | |
91 | ; Out D A' B' C' | |
92 | %macro FOLD3 6 | |
93 | %define %%x0 %1 ; xmm reg, in/out | |
94 | %define %%x1 %2 ; xmm reg, in/out | |
95 | %define %%x2 %3 ; xmm reg, in/out | |
96 | %define %%x3 %4 ; xmm reg, in/out | |
97 | %define %%const %5 ; xmm reg, in | |
98 | %define %%tmp0 %6 ; xmm reg, tmp | |
99 | ||
100 | vmovdqa %%tmp0, %%x3 | |
101 | ||
102 | vmovaps %%x3, %%x2 | |
103 | vpclmulqdq %%x2, %%const, 0x01 | |
104 | vpclmulqdq %%x3, %%const, 0x10 | |
105 | vxorps %%x3, %%x2 | |
106 | ||
107 | vmovaps %%x2, %%x1 | |
108 | vpclmulqdq %%x1, %%const, 0x01 | |
109 | vpclmulqdq %%x2, %%const, 0x10 | |
110 | vxorps %%x2, %%x1 | |
111 | ||
112 | vmovaps %%x1, %%x0 | |
113 | vpclmulqdq %%x0, %%const, 0x01 | |
114 | vpclmulqdq %%x1, %%const, 0x10 | |
115 | vxorps %%x1, %%x0 | |
116 | ||
117 | vmovdqa %%x0, %%tmp0 | |
118 | %endm | |
119 | ||
120 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
121 | ||
122 | ; "shift" 2 input registers down 4 places | |
123 | ; macro FOLD2 x0, x1, x2, x3, const, tmp0 | |
124 | ; x0 x1 x2 x3 | |
125 | ; In A B C D | |
126 | ; Out C D A' B' | |
127 | %macro FOLD2 6 | |
128 | %define %%x0 %1 ; xmm reg, in/out | |
129 | %define %%x1 %2 ; xmm reg, in/out | |
130 | %define %%x2 %3 ; xmm reg, in/out | |
131 | %define %%x3 %4 ; xmm reg, in/out | |
132 | %define %%const %5 ; xmm reg, in | |
133 | %define %%tmp0 %6 ; xmm reg, tmp | |
134 | ||
135 | vmovdqa %%tmp0, %%x3 | |
136 | ||
137 | vmovaps %%x3, %%x1 | |
138 | vpclmulqdq %%x1, %%const, 0x01 | |
139 | vpclmulqdq %%x3, %%const, 0x10 | |
140 | vxorps %%x3, %%x1 | |
141 | ||
142 | vmovdqa %%x1, %%tmp0 | |
143 | vmovdqa %%tmp0, %%x2 | |
144 | ||
145 | vmovaps %%x2, %%x0 | |
146 | vpclmulqdq %%x0, %%const, 0x01 | |
147 | vpclmulqdq %%x2, %%const, 0x10 | |
148 | vxorps %%x2, %%x0 | |
149 | ||
150 | vmovdqa %%x0, %%tmp0 | |
151 | %endm | |
152 | ||
153 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
154 | ||
155 | ; "shift" 1 input registers down 4 places | |
156 | ; macro FOLD1 x0, x1, x2, x3, const, tmp0 | |
157 | ; x0 x1 x2 x3 | |
158 | ; In A B C D | |
159 | ; Out B C D A' | |
160 | %macro FOLD1 6 | |
161 | %define %%x0 %1 ; xmm reg, in/out | |
162 | %define %%x1 %2 ; xmm reg, in/out | |
163 | %define %%x2 %3 ; xmm reg, in/out | |
164 | %define %%x3 %4 ; xmm reg, in/out | |
165 | %define %%const %5 ; xmm reg, in | |
166 | %define %%tmp0 %6 ; xmm reg, tmp | |
167 | ||
168 | vmovdqa %%tmp0, %%x3 | |
169 | ||
170 | vmovaps %%x3, %%x0 | |
171 | vpclmulqdq %%x0, %%const, 0x01 | |
172 | vpclmulqdq %%x3, %%const, 0x10 | |
173 | vxorps %%x3, %%x0 | |
174 | ||
175 | vmovdqa %%x0, %%x1 | |
176 | vmovdqa %%x1, %%x2 | |
177 | vmovdqa %%x2, %%tmp0 | |
178 | %endm | |
179 | ||
180 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
181 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
182 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
183 | ||
184 | ; macro PARTIAL_FOLD x0, x1, x2, x3, xp, size, xfold, xt0, xt1, xt2, xt3 | |
185 | ||
186 | ; XP X3 X2 X1 X0 tmp2 | |
187 | ; Initial state xI HG FE DC BA | |
188 | ; after shift IH GF ED CB A0 | |
189 | ; after fold ff GF ED CB ff = merge(IH, A0) | |
190 | ; | |
191 | %macro PARTIAL_FOLD 12 | |
192 | %define %%x0 %1 ; xmm reg, in/out | |
193 | %define %%x1 %2 ; xmm reg, in/out | |
194 | %define %%x2 %3 ; xmm reg, in/out | |
195 | %define %%x3 %4 ; xmm reg, in/out | |
196 | %define %%xp %5 ; xmm partial reg, in/clobbered | |
197 | %define %%size %6 ; GPR, in/clobbered (1...15) | |
198 | %define %%const %7 ; xmm reg, in | |
199 | %define %%shl %8 ; xmm reg, tmp | |
200 | %define %%shr %9 ; xmm reg, tmp | |
201 | %define %%tmp2 %10 ; xmm reg, tmp | |
202 | %define %%tmp3 %11 ; xmm reg, tmp | |
203 | %define %%gtmp %12 ; GPR, tmp | |
204 | ||
205 | ; {XP X3 X2 X1 X0} = {xI HG FE DC BA} | |
206 | shl %%size, 4 ; size *= 16 | |
207 | lea %%gtmp, [pshufb_shf_table - 16 WRT_OPT] | |
208 | vmovdqa %%shl, [%%gtmp + %%size] ; shl constant | |
209 | vmovdqa %%shr, %%shl | |
210 | vpxor %%shr, [mask3 WRT_OPT] ; shr constant | |
211 | ||
212 | vmovdqa %%tmp2, %%x0 ; tmp2 = BA | |
213 | vpshufb %%tmp2, %%shl ; tmp2 = A0 | |
214 | ||
215 | vpshufb %%x0, %%shr ; x0 = 0B | |
216 | vmovdqa %%tmp3, %%x1 ; tmp3 = DC | |
217 | vpshufb %%tmp3, %%shl ; tmp3 = C0 | |
218 | vpor %%x0, %%tmp3 ; x0 = CB | |
219 | ||
220 | vpshufb %%x1, %%shr ; x1 = 0D | |
221 | vmovdqa %%tmp3, %%x2 ; tmp3 = FE | |
222 | vpshufb %%tmp3, %%shl ; tmp3 = E0 | |
223 | vpor %%x1, %%tmp3 ; x1 = ED | |
224 | ||
225 | vpshufb %%x2, %%shr ; x2 = 0F | |
226 | vmovdqa %%tmp3, %%x3 ; tmp3 = HG | |
227 | vpshufb %%tmp3, %%shl ; tmp3 = G0 | |
228 | vpor %%x2, %%tmp3 ; x2 = GF | |
229 | ||
230 | vpshufb %%x3, %%shr ; x3 = 0H | |
231 | vpshufb %%xp, %%shl ; xp = I0 | |
232 | vpor %%x3, %%xp ; x3 = IH | |
233 | ||
234 | ; fold tmp2 into X3 | |
235 | vmovaps %%tmp3, %%tmp2 | |
236 | vpclmulqdq %%tmp2, %%const, 0x01 | |
237 | vpclmulqdq %%tmp3, %%const, 0x10 | |
238 | vxorps %%x3, %%tmp2 | |
239 | vxorps %%x3, %%tmp3 | |
240 | %endm | |
241 | ||
242 | ||
243 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
244 | ; LOAD_FRACTIONAL_XMM: Packs xmm register with data when data input is less than 16 bytes. | |
245 | ; Returns 0 if data has length 0. | |
246 | ; Input: The input data (src), that data's length (size). | |
247 | ; Output: The packed xmm register (xmm_out). | |
248 | ; size is clobbered. | |
249 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
250 | %macro LOAD_FRACTIONAL_XMM 3 | |
251 | %define %%xmm_out %1 ; %%xmm_out is an xmm register | |
252 | %define %%src %2 | |
253 | %define %%size %3 | |
254 | ||
255 | vpxor %%xmm_out, %%xmm_out | |
256 | ||
257 | cmp %%size, 0 | |
258 | je %%_done | |
259 | ||
260 | add %%src, %%size | |
261 | ||
262 | cmp %%size, 8 | |
263 | jl %%_byte_loop | |
264 | ||
265 | sub %%src, 8 | |
266 | vpinsrq %%xmm_out, [%%src], 0 ;Read in 8 bytes if they exists | |
267 | sub %%size, 8 | |
268 | ||
269 | je %%_done | |
270 | ||
271 | %%_byte_loop: ;Read in data 1 byte at a time while data is left | |
272 | vpslldq %%xmm_out, 1 | |
273 | ||
274 | dec %%src | |
275 | vpinsrb %%xmm_out, BYTE [%%src], 0 | |
276 | dec %%size | |
277 | ||
278 | jg %%_byte_loop | |
279 | ||
280 | %%_done: | |
281 | ||
282 | %endmacro ; LOAD_FRACTIONAL_XMM | |
283 | ||
284 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
285 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
286 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; | |
287 | ||
288 | ; copy x bytes (rounded up to 16 bytes) from src to dst | |
289 | ; src & dst are unaligned | |
290 | ; macro COPY_IN_CRC dst, src, size_in_bytes, tmp, x0, x1, x2, x3, xfold, | |
291 | ; xt0, xt1, xt2, xt3, xt4 | |
292 | %macro COPY_IN_CRC 14 | |
293 | %define %%dst %1 ; reg, in/clobbered | |
294 | %define %%src %2 ; reg, in/clobbered | |
295 | %define %%size %3 ; reg, in/clobbered | |
296 | %define %%tmp %4 ; reg, tmp | |
297 | %define %%x0 %5 ; xmm, in/out: crc state | |
298 | %define %%x1 %6 ; xmm, in/out: crc state | |
299 | %define %%x2 %7 ; xmm, in/out: crc state | |
300 | %define %%x3 %8 ; xmm, in/out: crc state | |
301 | %define %%xfold %9 ; xmm, in: (loaded from fold4) | |
302 | %define %%xtmp0 %10 ; xmm, tmp | |
303 | %define %%xtmp1 %11 ; xmm, tmp | |
304 | %define %%xtmp2 %12 ; xmm, tmp | |
305 | %define %%xtmp3 %13 ; xmm, tmp | |
306 | %define %%xtmp4 %14 ; xmm, tmp | |
307 | ||
308 | cmp %%size, 16 | |
309 | jl %%lt_16 | |
310 | ||
311 | ; align source | |
312 | xor %%tmp, %%tmp | |
313 | sub %%tmp, %%src | |
314 | and %%tmp, 15 | |
315 | jz %%already_aligned | |
316 | ||
317 | ; need to align, tmp contains number of bytes to transfer | |
318 | vmovdqu %%xtmp0, [%%src] | |
319 | vmovdqu [%%dst], %%xtmp0 | |
320 | add %%dst, %%tmp | |
321 | add %%src, %%tmp | |
322 | sub %%size, %%tmp | |
323 | ||
324 | %ifndef DEFLATE | |
325 | push %%dst | |
326 | ||
327 | PARTIAL_FOLD %%x0, %%x1, %%x2, %%x3, %%xtmp0, %%tmp, %%xfold, \ | |
328 | %%xtmp1, %%xtmp2, %%xtmp3, %%xtmp4, %%dst | |
329 | pop %%dst | |
330 | %endif | |
331 | ||
332 | %%already_aligned: | |
333 | sub %%size, 64 | |
334 | jl %%end_loop | |
335 | jmp %%loop | |
336 | align 16 | |
337 | %%loop: | |
338 | vmovntdqa %%xtmp0, [%%src+0*16] | |
339 | vmovntdqa %%xtmp1, [%%src+1*16] | |
340 | vmovntdqa %%xtmp2, [%%src+2*16] | |
341 | ||
342 | %ifndef DEFLATE | |
343 | FOLD4 %%x0, %%x1, %%x2, %%x3, %%xfold, %%xtmp3, %%xtmp4 | |
344 | %endif | |
345 | vmovntdqa %%xtmp3, [%%src+3*16] | |
346 | ||
347 | vmovdqu [%%dst+0*16], %%xtmp0 | |
348 | vmovdqu [%%dst+1*16], %%xtmp1 | |
349 | vmovdqu [%%dst+2*16], %%xtmp2 | |
350 | vmovdqu [%%dst+3*16], %%xtmp3 | |
351 | ||
352 | %ifndef DEFLATE | |
353 | vpxor %%x0, %%xtmp0 | |
354 | vpxor %%x1, %%xtmp1 | |
355 | vpxor %%x2, %%xtmp2 | |
356 | vpxor %%x3, %%xtmp3 | |
357 | %endif | |
358 | add %%src, 4*16 | |
359 | add %%dst, 4*16 | |
360 | sub %%size, 4*16 | |
361 | jge %%loop | |
362 | ||
363 | %%end_loop: | |
364 | ; %%size contains (num bytes left - 64) | |
365 | add %%size, 16 | |
366 | jge %%three_full_regs | |
367 | add %%size, 16 | |
368 | jge %%two_full_regs | |
369 | add %%size, 16 | |
370 | jge %%one_full_reg | |
371 | add %%size, 16 | |
372 | ||
373 | %%no_full_regs: ; 0 <= %%size < 16, no full regs | |
374 | jz %%done ; if no bytes left, we're done | |
375 | jmp %%partial | |
376 | ||
377 | ;; Handle case where input is <16 bytes | |
378 | %%lt_16: | |
379 | test %%size, %%size | |
380 | jz %%done ; if no bytes left, we're done | |
381 | jmp %%partial | |
382 | ||
383 | ||
384 | %%one_full_reg: | |
385 | vmovntdqa %%xtmp0, [%%src+0*16] | |
386 | ||
387 | %ifndef DEFLATE | |
388 | FOLD1 %%x0, %%x1, %%x2, %%x3, %%xfold, %%xtmp3 | |
389 | %endif | |
390 | vmovdqu [%%dst+0*16], %%xtmp0 | |
391 | ||
392 | %ifndef DEFLATE | |
393 | vpxor %%x3, %%xtmp0 | |
394 | %endif | |
395 | test %%size, %%size | |
396 | jz %%done ; if no bytes left, we're done | |
397 | ||
398 | add %%dst, 1*16 | |
399 | add %%src, 1*16 | |
400 | jmp %%partial | |
401 | ||
402 | ||
403 | %%two_full_regs: | |
404 | vmovntdqa %%xtmp0, [%%src+0*16] | |
405 | vmovntdqa %%xtmp1, [%%src+1*16] | |
406 | ||
407 | %ifndef DEFLATE | |
408 | FOLD2 %%x0, %%x1, %%x2, %%x3, %%xfold, %%xtmp3 | |
409 | %endif | |
410 | vmovdqu [%%dst+0*16], %%xtmp0 | |
411 | vmovdqu [%%dst+1*16], %%xtmp1 | |
412 | ||
413 | %ifndef DEFLATE | |
414 | vpxor %%x2, %%xtmp0 | |
415 | vpxor %%x3, %%xtmp1 | |
416 | %endif | |
417 | test %%size, %%size | |
418 | jz %%done ; if no bytes left, we're done | |
419 | ||
420 | add %%dst, 2*16 | |
421 | add %%src, 2*16 | |
422 | jmp %%partial | |
423 | ||
424 | ||
425 | %%three_full_regs: | |
426 | vmovntdqa %%xtmp0, [%%src+0*16] | |
427 | vmovntdqa %%xtmp1, [%%src+1*16] | |
428 | vmovntdqa %%xtmp2, [%%src+2*16] | |
429 | ||
430 | %ifndef DEFLATE | |
431 | FOLD3 %%x0, %%x1, %%x2, %%x3, %%xfold, %%xtmp3 | |
432 | %endif | |
433 | vmovdqu [%%dst+0*16], %%xtmp0 | |
434 | vmovdqu [%%dst+1*16], %%xtmp1 | |
435 | vmovdqu [%%dst+2*16], %%xtmp2 | |
436 | ||
437 | %ifndef DEFLATE | |
438 | vpxor %%x1, %%xtmp0 | |
439 | vpxor %%x2, %%xtmp1 | |
440 | vpxor %%x3, %%xtmp2 | |
441 | %endif | |
442 | test %%size, %%size | |
443 | jz %%done ; if no bytes left, we're done | |
444 | ||
445 | add %%dst, 3*16 | |
446 | add %%src, 3*16 | |
447 | ||
448 | ; fall through to %%partial | |
449 | %%partial: ; 0 <= %%size < 16 | |
450 | ||
451 | %ifndef DEFLATE | |
452 | mov %%tmp, %%size | |
453 | %endif | |
454 | ||
455 | LOAD_FRACTIONAL_XMM %%xtmp0, %%src, %%size | |
456 | ||
457 | vmovdqu [%%dst], %%xtmp0 | |
458 | ||
459 | %ifndef DEFLATE | |
460 | PARTIAL_FOLD %%x0, %%x1, %%x2, %%x3, %%xtmp0, %%tmp, %%xfold, \ | |
461 | %%xtmp1, %%xtmp2, %%xtmp3, %%xtmp4, %%dst | |
462 | %endif | |
463 | ||
464 | %%done: | |
465 | %endm | |
466 | ||
467 | ||
468 | ;%assign K 1024; | |
469 | ;%assign D 8 * K; ; Amount of history | |
470 | ;%assign LA 17 * 16; ; Max look-ahead, rounded up to 32 byte boundary | |
471 | ||
472 | ; copy D + LA bytes from src to dst | |
473 | ; dst is aligned | |
474 | ;void copy_D_LA(uint8_t *dst, uint8_t *src); | |
475 | ; arg 1: rcx : dst | |
476 | ; arg 2: rdx : src | |
477 | ; copy_D_LA dst, src, tmp, xtmp0, xtmp1, xtmp2, xtmp3 | |
478 | %macro copy_D_LA 7 | |
479 | %define %%dst %1 ; reg, clobbered | |
480 | %define %%src %2 ; reg, clobbered | |
481 | %define %%tmp %3 | |
482 | %define %%ytmp0 %4 | |
483 | %define %%ytmp1 %5 | |
484 | %define %%ytmp2 %6 | |
485 | %define %%ytmp3 %7 | |
486 | ||
487 | %define %%xtmp0 %4x | |
488 | ||
489 | %assign %%SIZE (D + LA) / 32 ; number of DQ words to be copied | |
490 | %assign %%SIZE4 %%SIZE/4 | |
491 | %assign %%MOD16 ((D + LA) - 32 * %%SIZE) / 16 | |
492 | ||
493 | lea %%tmp, [%%dst + 4 * 32 * %%SIZE4] | |
494 | jmp %%copy_D_LA_1 | |
495 | align 16 | |
496 | %%copy_D_LA_1: | |
497 | vmovdqu %%ytmp0, [%%src] | |
498 | vmovdqu %%ytmp1, [%%src + 1 * 32] | |
499 | vmovdqu %%ytmp2, [%%src + 2 * 32] | |
500 | vmovdqu %%ytmp3, [%%src + 3 * 32] | |
501 | vmovdqa [%%dst], %%ytmp0 | |
502 | vmovdqa [%%dst + 1 * 32], %%ytmp1 | |
503 | vmovdqa [%%dst + 2 * 32], %%ytmp2 | |
504 | vmovdqa [%%dst + 3 * 32], %%ytmp3 | |
505 | add %%src, 4*32 | |
506 | add %%dst, 4*32 | |
507 | cmp %%dst, %%tmp | |
508 | jne %%copy_D_LA_1 | |
509 | %assign %%i 0 | |
510 | %rep (%%SIZE - 4 * %%SIZE4) | |
511 | ||
512 | %if (%%i == 0) | |
513 | vmovdqu %%ytmp0, [%%src + %%i*32] | |
514 | %elif (%%i == 1) | |
515 | vmovdqu %%ytmp1, [%%src + %%i*32] | |
516 | %elif (%%i == 2) | |
517 | vmovdqu %%ytmp2, [%%src + %%i*32] | |
518 | %elif (%%i == 3) | |
519 | vmovdqu %%ytmp3, [%%src + %%i*32] | |
520 | %else | |
521 | %error too many i | |
522 | % error | |
523 | %endif | |
524 | ||
525 | %assign %%i %%i+1 | |
526 | %endrep | |
527 | %assign %%i 0 | |
528 | %rep (%%SIZE - 4 * %%SIZE4) | |
529 | ||
530 | %if (%%i == 0) | |
531 | vmovdqa [%%dst + %%i*32], %%ytmp0 | |
532 | %elif (%%i == 1) | |
533 | vmovdqa [%%dst + %%i*32], %%ytmp1 | |
534 | %elif (%%i == 2) | |
535 | vmovdqa [%%dst + %%i*32], %%ytmp2 | |
536 | %elif (%%i == 3) | |
537 | vmovdqa [%%dst + %%i*32], %%ytmp3 | |
538 | %else | |
539 | %error too many i | |
540 | % error | |
541 | %endif | |
542 | ||
543 | %assign %%i %%i+1 | |
544 | %endrep | |
545 | ||
546 | %rep %%MOD16 | |
547 | vmovdqu %%xtmp0, [%%src + (%%SIZE - 4 * %%SIZE4)*32] | |
548 | vmovdqa [%%dst + (%%SIZE - 4 * %%SIZE4)*32], %%xtmp0 | |
549 | %endrep | |
550 | ||
551 | %endm | |
552 | %endif |