1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
32 ; uint64_t crc64_ecma_refl_by8(
33 ; uint64_t init_crc, //initial CRC value, 64 bits
34 ; const unsigned char *buf, //buffer pointer to calculate CRC on
35 ; uint64_t len //buffer length in bytes (64-bit data)
38 ; Reference paper titled "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction"
39 ; sample yasm command line:
40 ; yasm -f x64 -f elf64 -X gnu -g dwarf2 crc64_ecma_refl_by8
41 %include "reg_sizes.asm"
43 %define fetch_dist 1024
51 %ifidn __OUTPUT_FORMAT__, win64
62 %ifidn __OUTPUT_FORMAT__, win64
64 %define VARIABLE_OFFSET 16*10+8
66 %define VARIABLE_OFFSET 16*2+8
71 mk_global crc64_ecma_refl_by8, function
74 ; uint64_t c = crc ^ 0xffffffff,ffffffffL;
76 sub rsp, VARIABLE_OFFSET
78 %ifidn __OUTPUT_FORMAT__, win64
79 ; push the xmm registers into the stack to maintain
80 movdqa [rsp + XMM_SAVE + 16*0], xmm6
81 movdqa [rsp + XMM_SAVE + 16*1], xmm7
82 movdqa [rsp + XMM_SAVE + 16*2], xmm8
83 movdqa [rsp + XMM_SAVE + 16*3], xmm9
84 movdqa [rsp + XMM_SAVE + 16*4], xmm10
85 movdqa [rsp + XMM_SAVE + 16*5], xmm11
86 movdqa [rsp + XMM_SAVE + 16*6], xmm12
87 movdqa [rsp + XMM_SAVE + 16*7], xmm13
90 ; check if smaller than 256B
93 ; for sizes less than 256, we can't fold 128B at a time...
97 ; load the initial crc value
98 movq xmm10, arg1 ; initial crc
99 ; receive the initial 128B data, xor the initial crc value
100 movdqu xmm0, [arg2+16*0]
101 movdqu xmm1, [arg2+16*1]
102 movdqu xmm2, [arg2+16*2]
103 movdqu xmm3, [arg2+16*3]
104 movdqu xmm4, [arg2+16*4]
105 movdqu xmm5, [arg2+16*5]
106 movdqu xmm6, [arg2+16*6]
107 movdqu xmm7, [arg2+16*7]
109 ; XOR the initial_crc value
111 movdqa xmm10, [rk3] ;xmm10 has rk3 and rk4
112 ;imm value of pclmulqdq instruction will determine which constant to use
113 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
114 ; we subtract 256 instead of 128 to save one instruction from the loop
117 ; at this section of the code, there is 128*x+y (0<=y<128) bytes of buffer. The _fold_128_B_loop
118 ; loop will fold 128B at a time until we have 128+y Bytes of buffer
121 ; fold 128B at a time. This section of the code folds 8 xmm registers in parallel
124 ; update the buffer pointer
127 prefetchnta [arg2+fetch_dist+0]
128 movdqu xmm9, [arg2+16*0]
129 movdqu xmm12, [arg2+16*1]
132 pclmulqdq xmm0, xmm10, 0x10
133 pclmulqdq xmm8, xmm10 , 0x1
134 pclmulqdq xmm1, xmm10, 0x10
135 pclmulqdq xmm13, xmm10 , 0x1
141 prefetchnta [arg2+fetch_dist+32]
142 movdqu xmm9, [arg2+16*2]
143 movdqu xmm12, [arg2+16*3]
146 pclmulqdq xmm2, xmm10, 0x10
147 pclmulqdq xmm8, xmm10 , 0x1
148 pclmulqdq xmm3, xmm10, 0x10
149 pclmulqdq xmm13, xmm10 , 0x1
155 prefetchnta [arg2+fetch_dist+64]
156 movdqu xmm9, [arg2+16*4]
157 movdqu xmm12, [arg2+16*5]
160 pclmulqdq xmm4, xmm10, 0x10
161 pclmulqdq xmm8, xmm10 , 0x1
162 pclmulqdq xmm5, xmm10, 0x10
163 pclmulqdq xmm13, xmm10 , 0x1
169 prefetchnta [arg2+fetch_dist+96]
170 movdqu xmm9, [arg2+16*6]
171 movdqu xmm12, [arg2+16*7]
174 pclmulqdq xmm6, xmm10, 0x10
175 pclmulqdq xmm8, xmm10 , 0x1
176 pclmulqdq xmm7, xmm10, 0x10
177 pclmulqdq xmm13, xmm10 , 0x1
185 ; check if there is another 128B in the buffer to be able to fold
187 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
190 ; at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128
191 ; the 128B of folded data is in 8 of the xmm registers: xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
194 ; fold the 8 xmm registers to 1 xmm register with different constants
198 pclmulqdq xmm0, xmm10, 0x1
199 pclmulqdq xmm8, xmm10, 0x10
205 pclmulqdq xmm1, xmm10, 0x1
206 pclmulqdq xmm8, xmm10, 0x10
212 pclmulqdq xmm2, xmm10, 0x1
213 pclmulqdq xmm8, xmm10, 0x10
219 pclmulqdq xmm3, xmm10, 0x1
220 pclmulqdq xmm8, xmm10, 0x10
226 pclmulqdq xmm4, xmm10, 0x1
227 pclmulqdq xmm8, xmm10, 0x10
233 pclmulqdq xmm5, xmm10, 0x1
234 pclmulqdq xmm8, xmm10, 0x10
240 pclmulqdq xmm6, xmm10, 0x1
241 pclmulqdq xmm8, xmm10, 0x10
246 ; instead of 128, we add 128-16 to the loop counter to save 1 instruction from the loop
247 ; instead of a cmp instruction, we use the negative flag with the jl instruction
249 jl _final_reduction_for_128
251 ; now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 and the rest is in memory
252 ; we can fold 16 bytes at a time if y>=16
253 ; continue folding 16B at a time
257 pclmulqdq xmm7, xmm10, 0x1
258 pclmulqdq xmm8, xmm10, 0x10
264 ; instead of a cmp instruction, we utilize the flags with the jge instruction
265 ; equivalent of: cmp arg3, 16-16
266 ; check if there is any more 16B in the buffer to be able to fold
267 jge _16B_reduction_loop
269 ;now we have 16+z bytes left to reduce, where 0<= z < 16.
270 ;first, we reduce the data in the xmm7 register
273 _final_reduction_for_128:
276 ; here we are getting data that is less than 16 bytes.
277 ; since we know that there was data before the pointer, we can offset the input pointer before the actual point, to receive exactly 16 bytes.
278 ; after that the registers need to be adjusted.
283 movdqu xmm1, [arg2 - 16 + arg3]
285 ; get rid of the extra data that was loaded before
286 ; load the shift constant
287 lea rax, [pshufb_shf_table]
296 pblendvb xmm2, xmm1 ;xmm0 is implicit
299 pclmulqdq xmm7, xmm10, 0x1
301 pclmulqdq xmm8, xmm10, 0x10
306 ; compute crc of a 128-bit value
311 pclmulqdq xmm7, xmm10, 0
320 pclmulqdq xmm7, xmm10, 0
322 pclmulqdq xmm7, xmm10, 0x10
329 ; return c ^ 0xffffffff, ffffffffL;
333 %ifidn __OUTPUT_FORMAT__, win64
334 movdqa xmm6, [rsp + XMM_SAVE + 16*0]
335 movdqa xmm7, [rsp + XMM_SAVE + 16*1]
336 movdqa xmm8, [rsp + XMM_SAVE + 16*2]
337 movdqa xmm9, [rsp + XMM_SAVE + 16*3]
338 movdqa xmm10, [rsp + XMM_SAVE + 16*4]
339 movdqa xmm11, [rsp + XMM_SAVE + 16*5]
340 movdqa xmm12, [rsp + XMM_SAVE + 16*6]
341 movdqa xmm13, [rsp + XMM_SAVE + 16*7]
343 add rsp, VARIABLE_OFFSET
346 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
347 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
348 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
349 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
354 ; check if there is enough buffer to be able to fold 16B at a time
358 ; if there is, load the constants
359 movdqa xmm10, [rk1] ; rk1 and rk2 in xmm10
361 movq xmm0, arg1 ; get the initial crc value
362 movdqu xmm7, [arg2] ; load the plaintext
365 ; update the buffer pointer
368 ; update the counter. subtract 32 instead of 16 to save one instruction from the loop
371 jmp _16B_reduction_loop
375 ; mov initial crc to the return value. this is necessary for zero-length buffers.
380 movq xmm0, arg1 ; get the initial crc value
384 jl _less_than_16_left
386 movdqu xmm7, [arg2] ; load the plaintext
387 pxor xmm7, xmm0 ; xor the initial crc value
390 movdqa xmm10, [rk1] ; rk1 and rk2 in xmm10
391 jmp _get_last_two_xmms
396 ; use stack space to load data less than 16 bytes, zero-out the 16B in memory first.
402 ; backup the counter value
445 pxor xmm7, xmm0 ; xor the initial crc value
447 lea rax,[pshufb_shf_table]
453 movdqu xmm0, [rax + r9]
458 ; Left shift (8-length) bytes in XMM
459 movdqu xmm0, [rax + r9 + 8]
467 pxor xmm7, xmm0 ; xor the initial crc value
473 ; precomputed constants
475 ; rk7 = floor(2^128/Q)
478 DQ 0xdabe95afc7875f40
480 DQ 0xe05dd497ca393ae4
482 DQ 0xd7d86b2af73de740
484 DQ 0x8757d71d4fcc1000
486 DQ 0xdabe95afc7875f40
488 DQ 0x0000000000000000
490 DQ 0x9c3e466c172963d5
492 DQ 0x92d8af2baf0e1e84
494 DQ 0x947874de595052cb
496 DQ 0x9e735cb59b4724da
498 DQ 0xe4ce2cd55fea0037
500 DQ 0x2fe3fd2920ce82ec
504 DQ 0x2e30203212cac325
508 DQ 0x6ae3efbb9dd441f3
510 DQ 0x69a35d91c3730254
512 DQ 0xb5ea1af9c013aca4
514 DQ 0x3be653a30fe1af51
516 DQ 0x60095b008a9efa44
520 ; use these values for shift constants for the pshufb instruction
521 ; different alignments result in values as shown:
522 ; dq 0x8887868584838281, 0x008f8e8d8c8b8a89 ; shl 15 (16-1) / shr1
523 ; dq 0x8988878685848382, 0x01008f8e8d8c8b8a ; shl 14 (16-3) / shr2
524 ; dq 0x8a89888786858483, 0x0201008f8e8d8c8b ; shl 13 (16-4) / shr3
525 ; dq 0x8b8a898887868584, 0x030201008f8e8d8c ; shl 12 (16-4) / shr4
526 ; dq 0x8c8b8a8988878685, 0x04030201008f8e8d ; shl 11 (16-5) / shr5
527 ; dq 0x8d8c8b8a89888786, 0x0504030201008f8e ; shl 10 (16-6) / shr6
528 ; dq 0x8e8d8c8b8a898887, 0x060504030201008f ; shl 9 (16-7) / shr7
529 ; dq 0x8f8e8d8c8b8a8988, 0x0706050403020100 ; shl 8 (16-8) / shr8
530 ; dq 0x008f8e8d8c8b8a89, 0x0807060504030201 ; shl 7 (16-9) / shr9
531 ; dq 0x01008f8e8d8c8b8a, 0x0908070605040302 ; shl 6 (16-10) / shr10
532 ; dq 0x0201008f8e8d8c8b, 0x0a09080706050403 ; shl 5 (16-11) / shr11
533 ; dq 0x030201008f8e8d8c, 0x0b0a090807060504 ; shl 4 (16-12) / shr12
534 ; dq 0x04030201008f8e8d, 0x0c0b0a0908070605 ; shl 3 (16-13) / shr13
535 ; dq 0x0504030201008f8e, 0x0d0c0b0a09080706 ; shl 2 (16-14) / shr14
536 ; dq 0x060504030201008f, 0x0e0d0c0b0a090807 ; shl 1 (16-15) / shr15
537 dq 0x8786858483828100, 0x8f8e8d8c8b8a8988
538 dq 0x0706050403020100, 0x000e0d0c0b0a0908
542 dq 0xFFFFFFFFFFFFFFFF, 0x0000000000000000
544 dq 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFF
546 dq 0x8080808080808080, 0x8080808080808080
548 ;;; func core, ver, snum
549 slversion crc64_ecma_refl_by8, 01, 00, 001d