1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
32 ; uint64_t crc64_ecma_refl_by8(
33 ; uint64_t init_crc, //initial CRC value, 64 bits
34 ; const unsigned char *buf, //buffer pointer to calculate CRC on
35 ; uint64_t len //buffer length in bytes (64-bit data)
38 ; Reference paper titled "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction"
39 ; sample yasm command line:
40 ; yasm -f x64 -f elf64 -X gnu -g dwarf2 crc64_ecma_refl_by8
41 %include "reg_sizes.asm"
43 %define fetch_dist 1024
51 %ifidn __OUTPUT_FORMAT__, win64
62 %ifidn __OUTPUT_FORMAT__, win64
64 %define VARIABLE_OFFSET 16*10+8
66 %define VARIABLE_OFFSET 16*2+8
71 global crc64_ecma_refl_by8:function
73 ; uint64_t c = crc ^ 0xffffffff,ffffffffL;
75 sub rsp, VARIABLE_OFFSET
77 %ifidn __OUTPUT_FORMAT__, win64
78 ; push the xmm registers into the stack to maintain
79 movdqa [rsp + XMM_SAVE + 16*0], xmm6
80 movdqa [rsp + XMM_SAVE + 16*1], xmm7
81 movdqa [rsp + XMM_SAVE + 16*2], xmm8
82 movdqa [rsp + XMM_SAVE + 16*3], xmm9
83 movdqa [rsp + XMM_SAVE + 16*4], xmm10
84 movdqa [rsp + XMM_SAVE + 16*5], xmm11
85 movdqa [rsp + XMM_SAVE + 16*6], xmm12
86 movdqa [rsp + XMM_SAVE + 16*7], xmm13
89 ; check if smaller than 256B
92 ; for sizes less than 256, we can't fold 128B at a time...
96 ; load the initial crc value
97 movq xmm10, arg1 ; initial crc
98 ; receive the initial 128B data, xor the initial crc value
99 movdqu xmm0, [arg2+16*0]
100 movdqu xmm1, [arg2+16*1]
101 movdqu xmm2, [arg2+16*2]
102 movdqu xmm3, [arg2+16*3]
103 movdqu xmm4, [arg2+16*4]
104 movdqu xmm5, [arg2+16*5]
105 movdqu xmm6, [arg2+16*6]
106 movdqu xmm7, [arg2+16*7]
108 ; XOR the initial_crc value
110 movdqa xmm10, [rk3] ;xmm10 has rk3 and rk4
111 ;imm value of pclmulqdq instruction will determine which constant to use
112 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
113 ; we subtract 256 instead of 128 to save one instruction from the loop
116 ; at this section of the code, there is 128*x+y (0<=y<128) bytes of buffer. The _fold_128_B_loop
117 ; loop will fold 128B at a time until we have 128+y Bytes of buffer
120 ; fold 128B at a time. This section of the code folds 8 xmm registers in parallel
123 ; update the buffer pointer
126 prefetchnta [arg2+fetch_dist+0]
127 movdqu xmm9, [arg2+16*0]
128 movdqu xmm12, [arg2+16*1]
131 pclmulqdq xmm0, xmm10, 0x10
132 pclmulqdq xmm8, xmm10 , 0x1
133 pclmulqdq xmm1, xmm10, 0x10
134 pclmulqdq xmm13, xmm10 , 0x1
140 prefetchnta [arg2+fetch_dist+32]
141 movdqu xmm9, [arg2+16*2]
142 movdqu xmm12, [arg2+16*3]
145 pclmulqdq xmm2, xmm10, 0x10
146 pclmulqdq xmm8, xmm10 , 0x1
147 pclmulqdq xmm3, xmm10, 0x10
148 pclmulqdq xmm13, xmm10 , 0x1
154 prefetchnta [arg2+fetch_dist+64]
155 movdqu xmm9, [arg2+16*4]
156 movdqu xmm12, [arg2+16*5]
159 pclmulqdq xmm4, xmm10, 0x10
160 pclmulqdq xmm8, xmm10 , 0x1
161 pclmulqdq xmm5, xmm10, 0x10
162 pclmulqdq xmm13, xmm10 , 0x1
168 prefetchnta [arg2+fetch_dist+96]
169 movdqu xmm9, [arg2+16*6]
170 movdqu xmm12, [arg2+16*7]
173 pclmulqdq xmm6, xmm10, 0x10
174 pclmulqdq xmm8, xmm10 , 0x1
175 pclmulqdq xmm7, xmm10, 0x10
176 pclmulqdq xmm13, xmm10 , 0x1
184 ; check if there is another 128B in the buffer to be able to fold
186 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
189 ; at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
190 ; the 128B of folded data is in 8 of the xmm registers: xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
193 ; fold the 8 xmm registers to 1 xmm register with different constants
197 pclmulqdq xmm0, xmm10, 0x1
198 pclmulqdq xmm8, xmm10, 0x10
204 pclmulqdq xmm1, xmm10, 0x1
205 pclmulqdq xmm8, xmm10, 0x10
211 pclmulqdq xmm2, xmm10, 0x1
212 pclmulqdq xmm8, xmm10, 0x10
218 pclmulqdq xmm3, xmm10, 0x1
219 pclmulqdq xmm8, xmm10, 0x10
225 pclmulqdq xmm4, xmm10, 0x1
226 pclmulqdq xmm8, xmm10, 0x10
232 pclmulqdq xmm5, xmm10, 0x1
233 pclmulqdq xmm8, xmm10, 0x10
239 pclmulqdq xmm6, xmm10, 0x1
240 pclmulqdq xmm8, xmm10, 0x10
245 ; instead of 128, we add 128-16 to the loop counter to save 1 instruction from the loop
246 ; instead of a cmp instruction, we use the negative flag with the jl instruction
248 jl _final_reduction_for_128
250 ; now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 and the rest is in memory
251 ; we can fold 16 bytes at a time if y>=16
252 ; continue folding 16B at a time
256 pclmulqdq xmm7, xmm10, 0x1
257 pclmulqdq xmm8, xmm10, 0x10
263 ; instead of a cmp instruction, we utilize the flags with the jge instruction
264 ; equivalent of: cmp arg3, 16-16
265 ; check if there is any more 16B in the buffer to be able to fold
266 jge _16B_reduction_loop
268 ;now we have 16+z bytes left to reduce, where 0<= z < 16.
269 ;first, we reduce the data in the xmm7 register
272 _final_reduction_for_128:
275 ; here we are getting data that is less than 16 bytes.
276 ; since we know that there was data before the pointer, we can offset the input pointer before the actual point, to receive exactly 16 bytes.
277 ; after that the registers need to be adjusted.
282 movdqu xmm1, [arg2 - 16 + arg3]
284 ; get rid of the extra data that was loaded before
285 ; load the shift constant
286 lea rax, [pshufb_shf_table]
295 pblendvb xmm2, xmm1 ;xmm0 is implicit
298 pclmulqdq xmm7, xmm10, 0x1
300 pclmulqdq xmm8, xmm10, 0x10
305 ; compute crc of a 128-bit value
310 pclmulqdq xmm7, xmm10, 0
319 pclmulqdq xmm7, xmm10, 0
321 pclmulqdq xmm7, xmm10, 0x10
328 ; return c ^ 0xffffffff, ffffffffL;
332 %ifidn __OUTPUT_FORMAT__, win64
333 movdqa xmm6, [rsp + XMM_SAVE + 16*0]
334 movdqa xmm7, [rsp + XMM_SAVE + 16*1]
335 movdqa xmm8, [rsp + XMM_SAVE + 16*2]
336 movdqa xmm9, [rsp + XMM_SAVE + 16*3]
337 movdqa xmm10, [rsp + XMM_SAVE + 16*4]
338 movdqa xmm11, [rsp + XMM_SAVE + 16*5]
339 movdqa xmm12, [rsp + XMM_SAVE + 16*6]
340 movdqa xmm13, [rsp + XMM_SAVE + 16*7]
342 add rsp, VARIABLE_OFFSET
345 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
346 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
347 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
348 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
353 ; check if there is enough buffer to be able to fold 16B at a time
357 ; if there is, load the constants
358 movdqa xmm10, [rk1] ; rk1 and rk2 in xmm10
360 movq xmm0, arg1 ; get the initial crc value
361 movdqu xmm7, [arg2] ; load the plaintext
364 ; update the buffer pointer
367 ; update the counter. subtract 32 instead of 16 to save one instruction from the loop
370 jmp _16B_reduction_loop
374 ; mov initial crc to the return value. this is necessary for zero-length buffers.
379 movq xmm0, arg1 ; get the initial crc value
383 jl _less_than_16_left
385 movdqu xmm7, [arg2] ; load the plaintext
386 pxor xmm7, xmm0 ; xor the initial crc value
389 movdqa xmm10, [rk1] ; rk1 and rk2 in xmm10
390 jmp _get_last_two_xmms
395 ; use stack space to load data less than 16 bytes, zero-out the 16B in memory first.
401 ; backup the counter value
444 pxor xmm7, xmm0 ; xor the initial crc value
446 lea rax,[pshufb_shf_table]
452 movdqu xmm0, [rax + r9]
457 ; Left shift (8-length) bytes in XMM
458 movdqu xmm0, [rax + r9 + 8]
466 pxor xmm7, xmm0 ; xor the initial crc value
472 ; precomputed constants
474 ; rk7 = floor(2^128/Q)
477 DQ 0xdabe95afc7875f40
479 DQ 0xe05dd497ca393ae4
481 DQ 0xd7d86b2af73de740
483 DQ 0x8757d71d4fcc1000
485 DQ 0xdabe95afc7875f40
487 DQ 0x0000000000000000
489 DQ 0x9c3e466c172963d5
491 DQ 0x92d8af2baf0e1e84
493 DQ 0x947874de595052cb
495 DQ 0x9e735cb59b4724da
497 DQ 0xe4ce2cd55fea0037
499 DQ 0x2fe3fd2920ce82ec
503 DQ 0x2e30203212cac325
507 DQ 0x6ae3efbb9dd441f3
509 DQ 0x69a35d91c3730254
511 DQ 0xb5ea1af9c013aca4
513 DQ 0x3be653a30fe1af51
515 DQ 0x60095b008a9efa44
519 ; use these values for shift constants for the pshufb instruction
520 ; different alignments result in values as shown:
521 ; dq 0x8887868584838281, 0x008f8e8d8c8b8a89 ; shl 15 (16-1) / shr1
522 ; dq 0x8988878685848382, 0x01008f8e8d8c8b8a ; shl 14 (16-3) / shr2
523 ; dq 0x8a89888786858483, 0x0201008f8e8d8c8b ; shl 13 (16-4) / shr3
524 ; dq 0x8b8a898887868584, 0x030201008f8e8d8c ; shl 12 (16-4) / shr4
525 ; dq 0x8c8b8a8988878685, 0x04030201008f8e8d ; shl 11 (16-5) / shr5
526 ; dq 0x8d8c8b8a89888786, 0x0504030201008f8e ; shl 10 (16-6) / shr6
527 ; dq 0x8e8d8c8b8a898887, 0x060504030201008f ; shl 9 (16-7) / shr7
528 ; dq 0x8f8e8d8c8b8a8988, 0x0706050403020100 ; shl 8 (16-8) / shr8
529 ; dq 0x008f8e8d8c8b8a89, 0x0807060504030201 ; shl 7 (16-9) / shr9
530 ; dq 0x01008f8e8d8c8b8a, 0x0908070605040302 ; shl 6 (16-10) / shr10
531 ; dq 0x0201008f8e8d8c8b, 0x0a09080706050403 ; shl 5 (16-11) / shr11
532 ; dq 0x030201008f8e8d8c, 0x0b0a090807060504 ; shl 4 (16-12) / shr12
533 ; dq 0x04030201008f8e8d, 0x0c0b0a0908070605 ; shl 3 (16-13) / shr13
534 ; dq 0x0504030201008f8e, 0x0d0c0b0a09080706 ; shl 2 (16-14) / shr14
535 ; dq 0x060504030201008f, 0x0e0d0c0b0a090807 ; shl 1 (16-15) / shr15
536 dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
537 dq 0x0706050403020100, 0x000e0d0c0b0a0908
541 dq 0xFFFFFFFFFFFFFFFF, 0x0000000000000000
543 dq 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFF
545 dq 0x8080808080808080, 0x8080808080808080
547 ;;; func core, ver, snum
548 slversion crc64_ecma_refl_by8, 01, 00, 001d