1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 ;; code to compute 16 SHA1 using AVX-512
33 %include "reg_sizes.asm"
35 %ifdef HAVE_AS_KNOWS_AVX512
38 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
39 %define VMOVPS vmovdqu64
40 ;SIMD variables definition
54 %define SHUF_MASK zmm13
55 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
56 ;using extra 16 ZMM registers to place the inverse input data
73 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
88 ; T = ROTL_5(A) + Ft(B,C,D) + E + Kt + Wt
89 ; E=D, D=C, C=ROTL_30(B), B=A, A=T
92 ; 0-19 Ch(B,C,D) = (B&C) ^ (~B&D)
93 ; 20-39, 60-79 Parity(B,C,D) = B ^ C ^ D
94 ; 40-59 Maj(B,C,D) = (B&C) ^ (B&D) ^ (C&D)
96 vmovdqa32 XTMP1, B ; Copy B
97 vpaddd E, E, %%WT ; E = E + Wt
98 vpternlogd XTMP1, C, D, %%F_IMMED ; TMP1 = Ft(B,C,D)
99 vpaddd E, E, KT ; E = E + Wt + Kt
100 vprold XTMP0, A, 5 ; TMP0 = ROTL_5(A)
101 vpaddd E, E, XTMP1 ; E = Ft(B,C,D) + E + Kt + Wt
102 vprold B, B, 30 ; B = ROTL_30(B)
103 vpaddd E, E, XTMP0 ; E = T
108 ;; Insert murmur's instructions into this macro.
109 ;; Every section_loop of mh_sha1 calls PROCESS_LOOP 80 and
110 ;; MSG_SCHED_ROUND_16_79 64 times and processes 1024 Bytes.
111 ;; So insert 1 murmur block per section_loop.
112 %macro PROCESS_LOOP_MUR 2
116 ; T = ROTL_5(A) + Ft(B,C,D) + E + Kt + Wt
117 ; E=D, D=C, C=ROTL_30(B), B=A, A=T
120 ; 0-19 Ch(B,C,D) = (B&C) ^ (~B&D)
121 ; 20-39, 60-79 Parity(B,C,D) = B ^ C ^ D
122 ; 40-59 Maj(B,C,D) = (B&C) ^ (B&D) ^ (C&D)
124 mov mur_data1, [mur_in_p]
125 mov mur_data2, [mur_in_p + 8]
126 vmovdqa32 XTMP1, B ; Copy B
127 imul mur_data1, mur_c1_r
128 imul mur_data2, mur_c2_r
129 vpaddd E, E, %%WT ; E = E + Wt
132 vpternlogd XTMP1, C, D, %%F_IMMED ; TMP1 = Ft(B,C,D)
133 imul mur_data1, mur_c2_r
134 imul mur_data2, mur_c1_r
135 vpaddd E, E, KT ; E = E + Wt + Kt
136 xor mur_hash1, mur_data1
138 vprold XTMP0, A, 5 ; TMP0 = ROTL_5(A)
140 vpaddd E, E, XTMP1 ; E = Ft(B,C,D) + E + Kt + Wt
141 add mur_hash1, mur_hash2
142 vprold B, B, 30 ; B = ROTL_30(B)
143 lea mur_hash1, [mur_hash1 + mur_hash1*4 + N1]
144 vpaddd E, E, XTMP0 ; E = T
145 xor mur_hash2, mur_data2
150 %macro MSG_SCHED_ROUND_16_79_MUR 4
155 ; Wt = ROTL_1(Wt-3 ^ Wt-8 ^ Wt-14 ^ Wt-16)
156 ; Wt+16 = ROTL_1(Wt+13 ^ Wt+8 ^ Wt+2 ^ Wt)
157 vpternlogd %%WT, %%WTp2, %%WTp8, 0x96
159 vpxord %%WT, %%WT, %%WTp13
160 add mur_hash2, mur_hash1
161 lea mur_hash2, [mur_hash2 + mur_hash2*4 + N2]
165 %define APPEND(a,b) a %+ b
166 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
167 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
168 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
169 %ifidn __OUTPUT_FORMAT__, elf64
181 %define tmp3 r12 ; must be saved and restored
182 %define tmp4 r13 ; must be saved and restored
183 %define tmp5 r14 ; must be saved and restored
184 %define tmp6 r15 ; must be saved and restored
185 %define tmp7 rbx ; must be saved and restored
186 %define tmp8 rbp ; must be saved and restored
198 %macro FUNC_RESTORE 0
215 %define tmp1 r12 ; must be saved and restored
216 %define tmp2 r13 ; must be saved and restored
217 %define tmp3 r14 ; must be saved and restored
218 %define tmp4 r15 ; must be saved and restored
219 %define tmp5 rdi ; must be saved and restored
220 %define tmp6 rsi ; must be saved and restored
221 %define tmp7 rbx ; must be saved and restored
222 %define tmp8 rbp ; must be saved and restored
225 %define stack_size 10*16 + 9*8 ; must be an odd multiple of 8
227 %define arg(x) [rsp + stack_size + PS + PS*x]
228 ; remove unwind info macros
232 movdqa [rsp + 0*16], xmm6
233 movdqa [rsp + 1*16], xmm7
234 movdqa [rsp + 2*16], xmm8
235 movdqa [rsp + 3*16], xmm9
236 movdqa [rsp + 4*16], xmm10
237 movdqa [rsp + 5*16], xmm11
238 movdqa [rsp + 6*16], xmm12
239 movdqa [rsp + 7*16], xmm13
240 movdqa [rsp + 8*16], xmm14
241 movdqa [rsp + 9*16], xmm15
242 mov [rsp + 10*16 + 0*8], r12
243 mov [rsp + 10*16 + 1*8], r13
244 mov [rsp + 10*16 + 2*8], r14
245 mov [rsp + 10*16 + 3*8], r15
246 mov [rsp + 10*16 + 4*8], rdi
247 mov [rsp + 10*16 + 5*8], rsi
248 mov [rsp + 10*16 + 6*8], rbx
249 mov [rsp + 10*16 + 7*8], rbp
253 %macro FUNC_RESTORE 0
254 movdqa xmm6, [rsp + 0*16]
255 movdqa xmm7, [rsp + 1*16]
256 movdqa xmm8, [rsp + 2*16]
257 movdqa xmm9, [rsp + 3*16]
258 movdqa xmm10, [rsp + 4*16]
259 movdqa xmm11, [rsp + 5*16]
260 movdqa xmm12, [rsp + 6*16]
261 movdqa xmm13, [rsp + 7*16]
262 movdqa xmm14, [rsp + 8*16]
263 movdqa xmm15, [rsp + 9*16]
264 mov r12, [rsp + 10*16 + 0*8]
265 mov r13, [rsp + 10*16 + 1*8]
266 mov r14, [rsp + 10*16 + 2*8]
267 mov r15, [rsp + 10*16 + 3*8]
268 mov rdi, [rsp + 10*16 + 4*8]
269 mov rsi, [rsp + 10*16 + 5*8]
270 mov rbx, [rsp + 10*16 + 6*8]
271 mov rbp, [rsp + 10*16 + 7*8]
275 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
277 ;variables of mh_sha1
279 %define mh_digests_p arg1
280 %define mh_data_p arg2
282 ;variables of murmur3
283 %define mur_in_p tmp2
284 %define mur_digest_p arg3
285 %define mur_hash1 tmp3
286 %define mur_hash2 tmp4
287 %define mur_data1 tmp5
288 %define mur_data2 return
289 %define mur_c1_r tmp6
290 %define mur_c2_r arg5
291 ; constants of murmur3_x64_128
297 %define N1 0x52dce729;DWORD
298 %define N2 0x38495ab5;DWORD
299 %define C1 QWORD(0x87c37b91114253d5)
300 %define C2 QWORD(0x4cf5ad432745937f)
301 ;variables used by storing segs_digests on stack
302 %define RSP_SAVE tmp7
311 ; segs_digests:low addr-> high_addr
312 ; a | b | c | ...| p | (16)
313 ; h0 | h0 | h0 | ...| h0 | | Aa| Ab | Ac |...| Ap |
314 ; h1 | h1 | h1 | ...| h1 | | Ba| Bb | Bc |...| Bp |
316 ; h5 | h5 | h5 | ...| h5 | | Ea| Eb | Ec |...| Ep |
322 ;void mh_sha1_murmur3_x64_128_block_avx512 (const uint8_t * input_data,
323 ; uint32_t mh_sha1_digests[SHA1_DIGEST_WORDS][HASH_SEGS],
324 ; uint8_t frame_buffer[MH_SHA1_BLOCK_SIZE],
325 ; uint32_t murmur3_x64_128_digests[MURMUR3_x64_128_DIGEST_WORDS],
326 ; uint32_t num_blocks);
327 ; arg 0 pointer to input data
328 ; arg 1 pointer to digests, include segments digests(uint32_t digests[16][5])
329 ; arg 2 pointer to aligned_frame_buffer which is used to save the big_endian data.
330 ; arg 3 pointer to murmur3 digest
331 ; arg 4 number of 1KB blocks
333 global mh_sha1_murmur3_x64_128_block_avx512
334 func(mh_sha1_murmur3_x64_128_block_avx512)
343 ; align rsp to 64 Bytes needed by avx512
346 ; copy segs_digests into registers.
347 VMOVPS HH0, [mh_digests_p + 64*0]
348 VMOVPS HH1, [mh_digests_p + 64*1]
349 VMOVPS HH2, [mh_digests_p + 64*2]
350 VMOVPS HH3, [mh_digests_p + 64*3]
351 VMOVPS HH4, [mh_digests_p + 64*4]
352 ;a mask used to transform to big-endian data
353 vmovdqa64 SHUF_MASK, [PSHUFFLE_BYTE_FLIP_MASK]
355 ;init murmur variables
356 mov mur_in_p, mh_in_p ;different steps between murmur and mh_sha1
357 ;load murmur hash digests and multiplier
358 mov mur_hash1, [mur_digest_p]
359 mov mur_hash2, [mur_digest_p + 8]
364 ;transform to big-endian data and store on aligned_frame
365 ;using extra 16 ZMM registers instead of stack
369 VMOVPS APPEND(W,I),[mh_in_p + I*64+0*64]
370 VMOVPS APPEND(W,J),[mh_in_p + I*64+1*64]
372 vpshufb APPEND(W,I), APPEND(W,I), SHUF_MASK
373 vpshufb APPEND(W,J), APPEND(W,J), SHUF_MASK
383 vmovdqa32 KT, [K00_19]
391 %if N < 64 ; stitching 64 times
392 PROCESS_LOOP_MUR APPEND(W,J), I
393 MSG_SCHED_ROUND_16_79_MUR APPEND(W,J), APPEND(W,K), APPEND(W,L), APPEND(W,M)
394 %else ; 64 <= N < 80, without stitching
395 PROCESS_LOOP APPEND(W,J), I
398 vmovdqa32 KT, [K20_39]
401 vmovdqa32 KT, [K40_59]
404 vmovdqa32 KT, [K60_79]
408 PREFETCH_X [mh_in_p + 1024+128*(N / 20)]
409 PREFETCH_X [mh_in_p + 1024+128*(N / 20 +1)]
411 %assign J ((J+1)% 16)
412 %assign K ((K+1)% 16)
413 %assign L ((L+1)% 16)
414 %assign M ((M+1)% 16)
429 ;store murmur-hash digest
430 mov [mur_digest_p], mur_hash1
431 mov [mur_digest_p + 8], mur_hash2
433 ; copy segs_digests to mh_digests_p
434 VMOVPS [mh_digests_p + 64*0], HH0
435 VMOVPS [mh_digests_p + 64*1], HH1
436 VMOVPS [mh_digests_p + 64*2], HH2
437 VMOVPS [mh_digests_p + 64*3], HH3
438 VMOVPS [mh_digests_p + 64*4], HH4
440 mov rsp, RSP_SAVE ; restore rsp
447 section .data align=64
450 PSHUFFLE_BYTE_FLIP_MASK: dq 0x0405060700010203
451 dq 0x0c0d0e0f08090a0b
452 dq 0x0405060700010203
453 dq 0x0c0d0e0f08090a0b
454 dq 0x0405060700010203
455 dq 0x0c0d0e0f08090a0b
456 dq 0x0405060700010203
457 dq 0x0c0d0e0f08090a0b
459 K00_19: dq 0x5A8279995A827999
460 dq 0x5A8279995A827999
461 dq 0x5A8279995A827999
462 dq 0x5A8279995A827999
463 dq 0x5A8279995A827999
464 dq 0x5A8279995A827999
465 dq 0x5A8279995A827999
466 dq 0x5A8279995A827999
468 K20_39: dq 0x6ED9EBA16ED9EBA1
469 dq 0x6ED9EBA16ED9EBA1
470 dq 0x6ED9EBA16ED9EBA1
471 dq 0x6ED9EBA16ED9EBA1
472 dq 0x6ED9EBA16ED9EBA1
473 dq 0x6ED9EBA16ED9EBA1
474 dq 0x6ED9EBA16ED9EBA1
475 dq 0x6ED9EBA16ED9EBA1
477 K40_59: dq 0x8F1BBCDC8F1BBCDC
478 dq 0x8F1BBCDC8F1BBCDC
479 dq 0x8F1BBCDC8F1BBCDC
480 dq 0x8F1BBCDC8F1BBCDC
481 dq 0x8F1BBCDC8F1BBCDC
482 dq 0x8F1BBCDC8F1BBCDC
483 dq 0x8F1BBCDC8F1BBCDC
484 dq 0x8F1BBCDC8F1BBCDC
486 K60_79: dq 0xCA62C1D6CA62C1D6
487 dq 0xCA62C1D6CA62C1D6
488 dq 0xCA62C1D6CA62C1D6
489 dq 0xCA62C1D6CA62C1D6
490 dq 0xCA62C1D6CA62C1D6
491 dq 0xCA62C1D6CA62C1D6
492 dq 0xCA62C1D6CA62C1D6
493 dq 0xCA62C1D6CA62C1D6
496 %ifidn __OUTPUT_FORMAT__, win64
497 global no_sha1_murmur3_x64_128_block_avx512
498 no_sha1_murmur3_x64_128_block_avx512:
500 %endif ; HAVE_AS_KNOWS_AVX512