2 ;; Copyright (c) 2012-2018, Intel Corporation
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ; This code schedules 1 blocks at a time, with 4 lanes per block
29 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
32 %define VMOVDQ vmovdqu ;; assume buffers not aligned
35 %define FUNC sha512_one_block_avx
38 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
44 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
46 ; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
47 ; Load xmm with mem and byte swap each dword
48 %macro COPY_XMM_AND_BSWAP 3
53 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
70 %define BYTE_FLIP_MASK xmm12
73 %define CTX rsi ; 2nd arg
74 %define INP rdi ; 1st arg
76 %define SRND rdi ; clobbers INP
81 %define CTX rdx ; 2nd arg
82 %define INP rcx ; 1st arg
84 %define SRND rcx ; clobbers INP
103 %define H0 0x6a09e667f3bcc908
104 %define H1 0xbb67ae8584caa73b
105 %define H2 0x3c6ef372fe94f82b
106 %define H3 0xa54ff53a5f1d36f1
107 %define H4 0x510e527fade682d1
108 %define H5 0x9b05688c2b3e6c1f
109 %define H6 0x1f83d9abfb41bd6b
110 %define H7 0x5be0cd19137e2179
122 ; Rotate values of symbols X0...X7
136 ; Rotate values of symbols a...h
149 %macro TWO_ROUNDS_AND_SCHED 0
151 vpalignr XTMP0, X5, X4, 8 ; XTMP0 = W[-7]
152 ;; compute s0 four at a time and s1 two at a time
153 ;; compute W[-16] + W[-7] 4 at a time
156 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
157 vpaddq XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
158 xor y0, e ; y0 = e ^ (e >> (41-18))
160 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
162 vpalignr XTMP1, X1, X0, 8 ; XTMP1 = W[-15]
163 xor y1, a ; y1 = a ^ (a >> (39-34)
164 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
165 vpsllq XTMP2, XTMP1, (64-1)
167 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
168 vpsrlq XTMP3, XTMP1, 1
169 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
170 and y2, e ; y2 = (f^g)&e
171 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
172 vpor XTMP2, XTMP2, XTMP3 ; XTMP2 = W[-15] ror 1
173 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
174 xor y2, g ; y2 = CH = ((f^g)&e)^g
175 add y2, y0 ; y2 = S1 + CH
176 vpsrlq XTMP3, XTMP1, 8
177 add y2, [rsp + _XFER + 0*8] ; y2 = k + w + S1 + CH
178 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
180 vpsllq X0, XTMP1, (64-8)
181 add h, y2 ; h = h + S1 + CH + k + w
185 add d, h ; d = d + t1
187 and y0, b ; y0 = (a|c)&b
188 vpsrlq XTMP1, XTMP1, 7 ; X0 = W[-15] >> 7
189 add h, y1 ; h = t1 + S0
190 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
191 vpxor XTMP1, XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1 ^ W[-15] ror 8
192 add h, y0 ; h = t1 + S0 + MAJ
193 vpxor XTMP1, XTMP1, X0 ; XTMP1 = s0
198 vpaddq XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
201 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
202 vpsllq XTMP3, X7, (64-19)
203 xor y0, e ; y0 = e ^ (e >> (41-18))
205 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
207 xor y1, a ; y1 = a ^ (a >> (39-34)
208 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
209 vpor XTMP3, XTMP3, X0 ; XTMP3 = W[-2] ror 19
211 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
212 vpsllq XTMP2, X7, (64-61)
213 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
214 and y2, e ; y2 = (f^g)&e
215 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
217 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
218 xor y2, g ; y2 = CH = ((f^g)&e)^g
219 add y2, y0 ; y2 = S1 + CH
220 vpor XTMP2, XTMP2, XTMP1 ; XTMP2 = W[-2] ror 61
221 add y2, [rsp + _XFER + 1*8] ; y2 = k + w + S1 + CH
222 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
224 vpsrlq XTMP1, X7, 6 ; XTMP1 = W[-2] >> 6
225 add h, y2 ; h = h + S1 + CH + k + w
228 vpxor XTMP1, XTMP1, XTMP2
229 add d, h ; d = d + t1
231 and y0, b ; y0 = (a|c)&b
232 vpxor X0, XTMP3, XTMP1 ; X0 = s1
233 add h, y1 ; h = t1 + S0
234 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
235 add h, y0 ; h = t1 + S0 + MAJ
236 vpaddq X0, X0, XTMP0 ; X0 = {W[1], W[0]}
242 ;; input is [rsp + _XFER + %1 * 8]
245 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
247 xor y0, e ; y0 = e ^ (e >> (41-18))
248 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
250 xor y1, a ; y1 = a ^ (a >> (39-34)
251 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
253 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (25-6))
254 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
255 and y2, e ; y2 = (f^g)&e
256 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
257 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
258 xor y2, g ; y2 = CH = ((f^g)&e)^g
259 add y2, y0 ; y2 = S1 + CH
260 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
261 add y2, [rsp + _XFER + %1*8] ; y2 = k + w + S1 + CH
263 add h, y2 ; h = h + S1 + CH + k + w
266 add d, h ; d = d + t1
268 and y0, b ; y0 = (a|c)&b
269 add h, y1 ; h = t1 + S0
270 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
271 add h, y0 ; h = t1 + S0 + MAJ
279 dq 0x428a2f98d728ae22,0x7137449123ef65cd
280 dq 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
281 dq 0x3956c25bf348b538,0x59f111f1b605d019
282 dq 0x923f82a4af194f9b,0xab1c5ed5da6d8118
283 dq 0xd807aa98a3030242,0x12835b0145706fbe
284 dq 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
285 dq 0x72be5d74f27b896f,0x80deb1fe3b1696b1
286 dq 0x9bdc06a725c71235,0xc19bf174cf692694
287 dq 0xe49b69c19ef14ad2,0xefbe4786384f25e3
288 dq 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
289 dq 0x2de92c6f592b0275,0x4a7484aa6ea6e483
290 dq 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
291 dq 0x983e5152ee66dfab,0xa831c66d2db43210
292 dq 0xb00327c898fb213f,0xbf597fc7beef0ee4
293 dq 0xc6e00bf33da88fc2,0xd5a79147930aa725
294 dq 0x06ca6351e003826f,0x142929670a0e6e70
295 dq 0x27b70a8546d22ffc,0x2e1b21385c26c926
296 dq 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
297 dq 0x650a73548baf63de,0x766a0abb3c77b2a8
298 dq 0x81c2c92e47edaee6,0x92722c851482353b
299 dq 0xa2bfe8a14cf10364,0xa81a664bbc423001
300 dq 0xc24b8b70d0f89791,0xc76c51a30654be30
301 dq 0xd192e819d6ef5218,0xd69906245565a910
302 dq 0xf40e35855771202a,0x106aa07032bbd1b8
303 dq 0x19a4c116b8d2d0c8,0x1e376c085141ab53
304 dq 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
305 dq 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
306 dq 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
307 dq 0x748f82ee5defb2fc,0x78a5636f43172f60
308 dq 0x84c87814a1f0ab72,0x8cc702081a6439ec
309 dq 0x90befffa23631e28,0xa4506cebde82bde9
310 dq 0xbef9a3f7b2c67915,0xc67178f2e372532b
311 dq 0xca273eceea26619c,0xd186b8c721c0c207
312 dq 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
313 dq 0x06f067aa72176fba,0x0a637dc5a2c898a6
314 dq 0x113f9804bef90dae,0x1b710b35131c471b
315 dq 0x28db77f523047d84,0x32caab7b40c72493
316 dq 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
317 dq 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
318 dq 0x5fcb6fab3ad6faec,0x6c44198c4a475817
330 PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
331 dq 0x0001020304050607, 0x08090a0b0c0d0e0f
334 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
335 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
336 ;; void FUNC(void *input_data, UINT64 digest[8])
337 ;; arg 1 : pointer to input data
338 ;; arg 2 : pointer to digest
340 MKGLOBAL(FUNC,function,)
355 vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6
356 vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7
357 vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8
358 vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9
359 vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10
360 vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11
361 vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12
362 vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13
365 ;; load initial digest
375 vmovdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
379 ;; byte swap first 16 qwords
380 COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
381 COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
382 COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
383 COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
384 COPY_XMM_AND_BSWAP X4, [INP + 4*16], BYTE_FLIP_MASK
385 COPY_XMM_AND_BSWAP X5, [INP + 5*16], BYTE_FLIP_MASK
386 COPY_XMM_AND_BSWAP X6, [INP + 6*16], BYTE_FLIP_MASK
387 COPY_XMM_AND_BSWAP X7, [INP + 7*16], BYTE_FLIP_MASK
389 ;; schedule 64 input qwords, by doing 4 iterations of 16 rounds
396 vpaddq XFER, X0, [TBL + i*16]
397 vmovdqa [rsp + _XFER], XFER
402 vpaddq XFER, X0, [TBL + 7*16]
403 vmovdqa [rsp + _XFER], XFER
419 vpaddq X0, X0, [TBL + 0*16]
420 vmovdqa [rsp + _XFER], X0
424 vpaddq X1, X1, [TBL + 1*16]
425 vmovdqa [rsp + _XFER], X1
429 vpaddq X2, X2, [TBL + 2*16]
430 vmovdqa [rsp + _XFER], X2
434 vpaddq X3, X3, [TBL + 3*16]
435 vmovdqa [rsp + _XFER], X3
462 vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16]
463 vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16]
464 vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16]
465 vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16]
466 vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16]
467 vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16]
468 vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16]
469 vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16]
488 section .note.GNU-stack noalloc noexec nowrite progbits