]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx / sha512_one_block_avx.asm
1 ;;
2 ;; Copyright (c) 2012-2018, Intel Corporation
3 ;;
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
6 ;;
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
15 ;;
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 ;;
27
28 ; This code schedules 1 blocks at a time, with 4 lanes per block
29 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 %include "os.asm"
31
32 %define VMOVDQ vmovdqu ;; assume buffers not aligned
33
34 %ifndef FUNC
35 %define FUNC sha512_one_block_avx
36 %endif
37
38 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
39
40 %macro MY_ROR 2
41 shld %1,%1,(64-(%2))
42 %endm
43
44 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
45
46 ; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
47 ; Load xmm with mem and byte swap each dword
48 %macro COPY_XMM_AND_BSWAP 3
49 VMOVDQ %1, %2
50 vpshufb %1, %3
51 %endmacro
52
53 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
54
55 %define X0 xmm4
56 %define X1 xmm5
57 %define X2 xmm6
58 %define X3 xmm7
59 %define X4 xmm8
60 %define X5 xmm9
61 %define X6 xmm10
62 %define X7 xmm11
63
64 %define XTMP0 xmm0
65 %define XTMP1 xmm1
66 %define XTMP2 xmm2
67 %define XTMP3 xmm3
68 %define XFER xmm13
69
70 %define BYTE_FLIP_MASK xmm12
71
72 %ifdef LINUX
73 %define CTX rsi ; 2nd arg
74 %define INP rdi ; 1st arg
75
76 %define SRND rdi ; clobbers INP
77 %define c rcx
78 %define d r8
79 %define e rdx
80 %else
81 %define CTX rdx ; 2nd arg
82 %define INP rcx ; 1st arg
83
84 %define SRND rcx ; clobbers INP
85 %define c rdi
86 %define d rsi
87 %define e r8
88
89 %endif
90 %define TBL rbp
91 %define a rax
92 %define b rbx
93
94 %define f r9
95 %define g r10
96 %define h r11
97
98 %define y0 r13
99 %define y1 r14
100 %define y2 r15
101
102 %ifndef H0
103 %define H0 0x6a09e667f3bcc908
104 %define H1 0xbb67ae8584caa73b
105 %define H2 0x3c6ef372fe94f82b
106 %define H3 0xa54ff53a5f1d36f1
107 %define H4 0x510e527fade682d1
108 %define H5 0x9b05688c2b3e6c1f
109 %define H6 0x1f83d9abfb41bd6b
110 %define H7 0x5be0cd19137e2179
111 %endif
112
113 struc STACK
114 %ifndef LINUX
115 _XMM_SAVE: reso 8
116 %endif
117 _XFER: reso 1
118 endstruc
119
120
121 ; rotate_Xs
122 ; Rotate values of symbols X0...X7
123 %macro rotate_Xs 0
124 %xdefine X_ X0
125 %xdefine X0 X1
126 %xdefine X1 X2
127 %xdefine X2 X3
128 %xdefine X3 X4
129 %xdefine X4 X5
130 %xdefine X5 X6
131 %xdefine X6 X7
132 %xdefine X7 X_
133 %endm
134
135 ; ROTATE_ARGS
136 ; Rotate values of symbols a...h
137 %macro ROTATE_ARGS 0
138 %xdefine TMP_ h
139 %xdefine h g
140 %xdefine g f
141 %xdefine f e
142 %xdefine e d
143 %xdefine d c
144 %xdefine c b
145 %xdefine b a
146 %xdefine a TMP_
147 %endm
148
149 %macro TWO_ROUNDS_AND_SCHED 0
150
151 vpalignr XTMP0, X5, X4, 8 ; XTMP0 = W[-7]
152 ;; compute s0 four at a time and s1 two at a time
153 ;; compute W[-16] + W[-7] 4 at a time
154 mov y0, e ; y0 = e
155 mov y1, a ; y1 = a
156 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
157 vpaddq XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
158 xor y0, e ; y0 = e ^ (e >> (41-18))
159 mov y2, f ; y2 = f
160 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
161 ;; compute s0
162 vpalignr XTMP1, X1, X0, 8 ; XTMP1 = W[-15]
163 xor y1, a ; y1 = a ^ (a >> (39-34)
164 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
165 vpsllq XTMP2, XTMP1, (64-1)
166 xor y2, g ; y2 = f^g
167 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
168 vpsrlq XTMP3, XTMP1, 1
169 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
170 and y2, e ; y2 = (f^g)&e
171 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
172 vpor XTMP2, XTMP2, XTMP3 ; XTMP2 = W[-15] ror 1
173 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
174 xor y2, g ; y2 = CH = ((f^g)&e)^g
175 add y2, y0 ; y2 = S1 + CH
176 vpsrlq XTMP3, XTMP1, 8
177 add y2, [rsp + _XFER + 0*8] ; y2 = k + w + S1 + CH
178 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
179 mov y0, a ; y0 = a
180 vpsllq X0, XTMP1, (64-8)
181 add h, y2 ; h = h + S1 + CH + k + w
182 mov y2, a ; y2 = a
183 or y0, c ; y0 = a|c
184 vpor X0, X0, XTMP3
185 add d, h ; d = d + t1
186 and y2, c ; y2 = a&c
187 and y0, b ; y0 = (a|c)&b
188 vpsrlq XTMP1, XTMP1, 7 ; X0 = W[-15] >> 7
189 add h, y1 ; h = t1 + S0
190 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
191 vpxor XTMP1, XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1 ^ W[-15] ror 8
192 add h, y0 ; h = t1 + S0 + MAJ
193 vpxor XTMP1, XTMP1, X0 ; XTMP1 = s0
194
195
196 ROTATE_ARGS
197 ;; compute s1
198 vpaddq XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
199 mov y0, e ; y0 = e
200 mov y1, a ; y1 = a
201 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
202 vpsllq XTMP3, X7, (64-19)
203 xor y0, e ; y0 = e ^ (e >> (41-18))
204 mov y2, f ; y2 = f
205 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
206 vpsrlq X0, X7, 19
207 xor y1, a ; y1 = a ^ (a >> (39-34)
208 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
209 vpor XTMP3, XTMP3, X0 ; XTMP3 = W[-2] ror 19
210 xor y2, g ; y2 = f^g
211 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
212 vpsllq XTMP2, X7, (64-61)
213 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
214 and y2, e ; y2 = (f^g)&e
215 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
216 vpsrlq XTMP1, X7, 61
217 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
218 xor y2, g ; y2 = CH = ((f^g)&e)^g
219 add y2, y0 ; y2 = S1 + CH
220 vpor XTMP2, XTMP2, XTMP1 ; XTMP2 = W[-2] ror 61
221 add y2, [rsp + _XFER + 1*8] ; y2 = k + w + S1 + CH
222 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
223 mov y0, a ; y0 = a
224 vpsrlq XTMP1, X7, 6 ; XTMP1 = W[-2] >> 6
225 add h, y2 ; h = h + S1 + CH + k + w
226 mov y2, a ; y2 = a
227 or y0, c ; y0 = a|c
228 vpxor XTMP1, XTMP1, XTMP2
229 add d, h ; d = d + t1
230 and y2, c ; y2 = a&c
231 and y0, b ; y0 = (a|c)&b
232 vpxor X0, XTMP3, XTMP1 ; X0 = s1
233 add h, y1 ; h = t1 + S0
234 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
235 add h, y0 ; h = t1 + S0 + MAJ
236 vpaddq X0, X0, XTMP0 ; X0 = {W[1], W[0]}
237
238 ROTATE_ARGS
239 rotate_Xs
240 %endm
241
242 ;; input is [rsp + _XFER + %1 * 8]
243 %macro DO_ROUND 1
244 mov y0, e ; y0 = e
245 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
246 mov y1, a ; y1 = a
247 xor y0, e ; y0 = e ^ (e >> (41-18))
248 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
249 mov y2, f ; y2 = f
250 xor y1, a ; y1 = a ^ (a >> (39-34)
251 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
252 xor y2, g ; y2 = f^g
253 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (25-6))
254 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
255 and y2, e ; y2 = (f^g)&e
256 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
257 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
258 xor y2, g ; y2 = CH = ((f^g)&e)^g
259 add y2, y0 ; y2 = S1 + CH
260 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
261 add y2, [rsp + _XFER + %1*8] ; y2 = k + w + S1 + CH
262 mov y0, a ; y0 = a
263 add h, y2 ; h = h + S1 + CH + k + w
264 mov y2, a ; y2 = a
265 or y0, c ; y0 = a|c
266 add d, h ; d = d + t1
267 and y2, c ; y2 = a&c
268 and y0, b ; y0 = (a|c)&b
269 add h, y1 ; h = t1 + S0
270 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
271 add h, y0 ; h = t1 + S0 + MAJ
272 ROTATE_ARGS
273 %endm
274
275 section .data
276 default rel
277 align 64
278 K512:
279 dq 0x428a2f98d728ae22,0x7137449123ef65cd
280 dq 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
281 dq 0x3956c25bf348b538,0x59f111f1b605d019
282 dq 0x923f82a4af194f9b,0xab1c5ed5da6d8118
283 dq 0xd807aa98a3030242,0x12835b0145706fbe
284 dq 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
285 dq 0x72be5d74f27b896f,0x80deb1fe3b1696b1
286 dq 0x9bdc06a725c71235,0xc19bf174cf692694
287 dq 0xe49b69c19ef14ad2,0xefbe4786384f25e3
288 dq 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
289 dq 0x2de92c6f592b0275,0x4a7484aa6ea6e483
290 dq 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
291 dq 0x983e5152ee66dfab,0xa831c66d2db43210
292 dq 0xb00327c898fb213f,0xbf597fc7beef0ee4
293 dq 0xc6e00bf33da88fc2,0xd5a79147930aa725
294 dq 0x06ca6351e003826f,0x142929670a0e6e70
295 dq 0x27b70a8546d22ffc,0x2e1b21385c26c926
296 dq 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
297 dq 0x650a73548baf63de,0x766a0abb3c77b2a8
298 dq 0x81c2c92e47edaee6,0x92722c851482353b
299 dq 0xa2bfe8a14cf10364,0xa81a664bbc423001
300 dq 0xc24b8b70d0f89791,0xc76c51a30654be30
301 dq 0xd192e819d6ef5218,0xd69906245565a910
302 dq 0xf40e35855771202a,0x106aa07032bbd1b8
303 dq 0x19a4c116b8d2d0c8,0x1e376c085141ab53
304 dq 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
305 dq 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
306 dq 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
307 dq 0x748f82ee5defb2fc,0x78a5636f43172f60
308 dq 0x84c87814a1f0ab72,0x8cc702081a6439ec
309 dq 0x90befffa23631e28,0xa4506cebde82bde9
310 dq 0xbef9a3f7b2c67915,0xc67178f2e372532b
311 dq 0xca273eceea26619c,0xd186b8c721c0c207
312 dq 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
313 dq 0x06f067aa72176fba,0x0a637dc5a2c898a6
314 dq 0x113f9804bef90dae,0x1b710b35131c471b
315 dq 0x28db77f523047d84,0x32caab7b40c72493
316 dq 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
317 dq 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
318 dq 0x5fcb6fab3ad6faec,0x6c44198c4a475817
319
320 h0: dq H0
321 h1: dq H1
322 h2: dq H2
323 h3: dq H3
324 h4: dq H4
325 h5: dq H5
326 h6: dq H6
327 h7: dq H7
328
329 align 16
330 PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
331 dq 0x0001020304050607, 0x08090a0b0c0d0e0f
332
333
334 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
335 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
336 ;; void FUNC(void *input_data, UINT64 digest[8])
337 ;; arg 1 : pointer to input data
338 ;; arg 2 : pointer to digest
339 section .text
340 MKGLOBAL(FUNC,function,)
341 align 32
342 FUNC:
343 push rbx
344 %ifndef LINUX
345 push rsi
346 push rdi
347 %endif
348 push rbp
349 push r13
350 push r14
351 push r15
352
353 sub rsp,STACK_size
354 %ifndef LINUX
355 vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6
356 vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7
357 vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8
358 vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9
359 vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10
360 vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11
361 vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12
362 vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13
363 %endif
364
365 ;; load initial digest
366 mov a,[rel h0]
367 mov b,[rel h1]
368 mov c,[rel h2]
369 mov d,[rel h3]
370 mov e,[rel h4]
371 mov f,[rel h5]
372 mov g,[rel h6]
373 mov h,[rel h7]
374
375 vmovdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
376
377 lea TBL,[rel K512]
378
379 ;; byte swap first 16 qwords
380 COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
381 COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
382 COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
383 COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
384 COPY_XMM_AND_BSWAP X4, [INP + 4*16], BYTE_FLIP_MASK
385 COPY_XMM_AND_BSWAP X5, [INP + 5*16], BYTE_FLIP_MASK
386 COPY_XMM_AND_BSWAP X6, [INP + 6*16], BYTE_FLIP_MASK
387 COPY_XMM_AND_BSWAP X7, [INP + 7*16], BYTE_FLIP_MASK
388
389 ;; schedule 64 input qwords, by doing 4 iterations of 16 rounds
390 mov SRND, 4
391 align 16
392 loop1:
393
394 %assign i 0
395 %rep 7
396 vpaddq XFER, X0, [TBL + i*16]
397 vmovdqa [rsp + _XFER], XFER
398 TWO_ROUNDS_AND_SCHED
399 %assign i (i+1)
400 %endrep
401
402 vpaddq XFER, X0, [TBL + 7*16]
403 vmovdqa [rsp + _XFER], XFER
404 add TBL, 8*16
405 TWO_ROUNDS_AND_SCHED
406
407 sub SRND, 1
408 jne loop1
409
410 mov SRND, 2
411 jmp loop2a
412 loop2:
413 vmovdqa X0, X4
414 vmovdqa X1, X5
415 vmovdqa X2, X6
416 vmovdqa X3, X7
417
418 loop2a:
419 vpaddq X0, X0, [TBL + 0*16]
420 vmovdqa [rsp + _XFER], X0
421 DO_ROUND 0
422 DO_ROUND 1
423
424 vpaddq X1, X1, [TBL + 1*16]
425 vmovdqa [rsp + _XFER], X1
426 DO_ROUND 0
427 DO_ROUND 1
428
429 vpaddq X2, X2, [TBL + 2*16]
430 vmovdqa [rsp + _XFER], X2
431 DO_ROUND 0
432 DO_ROUND 1
433
434 vpaddq X3, X3, [TBL + 3*16]
435 vmovdqa [rsp + _XFER], X3
436 add TBL, 4*16
437 DO_ROUND 0
438 DO_ROUND 1
439
440 sub SRND, 1
441 jne loop2
442
443 add a,[rel h0]
444 add b,[rel h1]
445 add c,[rel h2]
446 add d,[rel h3]
447 add e,[rel h4]
448 add f,[rel h5]
449 add g,[rel h6]
450 mov [8*0 + CTX],a
451 mov [8*1 + CTX],b
452 mov [8*2 + CTX],c
453 mov [8*3 + CTX],d
454 mov [8*4 + CTX],e
455 mov [8*5 + CTX],f
456 mov [8*6 + CTX],g
457 add h,[rel h7]
458 mov [8*7 + CTX],h
459
460 done_hash:
461 %ifndef LINUX
462 vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16]
463 vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16]
464 vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16]
465 vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16]
466 vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16]
467 vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16]
468 vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16]
469 vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16]
470 %endif
471
472 add rsp, STACK_size
473
474 pop r15
475 pop r14
476 pop r13
477 pop rbp
478 %ifndef LINUX
479 pop rdi
480 pop rsi
481 %endif
482 pop rbx
483
484 ret
485
486
487 %ifdef LINUX
488 section .note.GNU-stack noalloc noexec nowrite progbits
489 %endif