]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/intel-ipsec-mb/avx/sha512_one_block_avx.asm
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx / sha512_one_block_avx.asm
CommitLineData
11fdf7f2
TL
1;;
2;; Copyright (c) 2012-2018, Intel Corporation
3;;
4;; Redistribution and use in source and binary forms, with or without
5;; modification, are permitted provided that the following conditions are met:
6;;
7;; * Redistributions of source code must retain the above copyright notice,
8;; this list of conditions and the following disclaimer.
9;; * Redistributions in binary form must reproduce the above copyright
10;; notice, this list of conditions and the following disclaimer in the
11;; documentation and/or other materials provided with the distribution.
12;; * Neither the name of Intel Corporation nor the names of its contributors
13;; may be used to endorse or promote products derived from this software
14;; without specific prior written permission.
15;;
16;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26;;
27
28; This code schedules 1 blocks at a time, with 4 lanes per block
29;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30%include "os.asm"
31
32%define VMOVDQ vmovdqu ;; assume buffers not aligned
33
34%ifndef FUNC
9f95a23c 35%define FUNC sha512_block_avx
11fdf7f2
TL
36%endif
37
38;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros
39
40%macro MY_ROR 2
41shld %1,%1,(64-(%2))
42%endm
43
44;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
45
46; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask
47; Load xmm with mem and byte swap each dword
48%macro COPY_XMM_AND_BSWAP 3
49 VMOVDQ %1, %2
50 vpshufb %1, %3
51%endmacro
52
53;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
54
55%define X0 xmm4
56%define X1 xmm5
57%define X2 xmm6
58%define X3 xmm7
59%define X4 xmm8
60%define X5 xmm9
61%define X6 xmm10
62%define X7 xmm11
63
64%define XTMP0 xmm0
65%define XTMP1 xmm1
66%define XTMP2 xmm2
67%define XTMP3 xmm3
68%define XFER xmm13
69
70%define BYTE_FLIP_MASK xmm12
71
72%ifdef LINUX
73%define CTX rsi ; 2nd arg
74%define INP rdi ; 1st arg
75
76%define SRND rdi ; clobbers INP
77%define c rcx
78%define d r8
79%define e rdx
80%else
81%define CTX rdx ; 2nd arg
82%define INP rcx ; 1st arg
83
84%define SRND rcx ; clobbers INP
85%define c rdi
86%define d rsi
87%define e r8
88
89%endif
90%define TBL rbp
91%define a rax
92%define b rbx
93
94%define f r9
95%define g r10
96%define h r11
97
98%define y0 r13
99%define y1 r14
100%define y2 r15
101
11fdf7f2
TL
102struc STACK
103%ifndef LINUX
104_XMM_SAVE: reso 8
105%endif
106_XFER: reso 1
107endstruc
108
109
110; rotate_Xs
111; Rotate values of symbols X0...X7
112%macro rotate_Xs 0
113%xdefine X_ X0
114%xdefine X0 X1
115%xdefine X1 X2
116%xdefine X2 X3
117%xdefine X3 X4
118%xdefine X4 X5
119%xdefine X5 X6
120%xdefine X6 X7
121%xdefine X7 X_
122%endm
123
124; ROTATE_ARGS
125; Rotate values of symbols a...h
126%macro ROTATE_ARGS 0
127%xdefine TMP_ h
128%xdefine h g
129%xdefine g f
130%xdefine f e
131%xdefine e d
132%xdefine d c
133%xdefine c b
134%xdefine b a
135%xdefine a TMP_
136%endm
137
138%macro TWO_ROUNDS_AND_SCHED 0
139
140 vpalignr XTMP0, X5, X4, 8 ; XTMP0 = W[-7]
141 ;; compute s0 four at a time and s1 two at a time
142 ;; compute W[-16] + W[-7] 4 at a time
143 mov y0, e ; y0 = e
144 mov y1, a ; y1 = a
145 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
146 vpaddq XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]
147 xor y0, e ; y0 = e ^ (e >> (41-18))
148 mov y2, f ; y2 = f
149 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
150 ;; compute s0
151 vpalignr XTMP1, X1, X0, 8 ; XTMP1 = W[-15]
152 xor y1, a ; y1 = a ^ (a >> (39-34)
153 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
154 vpsllq XTMP2, XTMP1, (64-1)
155 xor y2, g ; y2 = f^g
156 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
157 vpsrlq XTMP3, XTMP1, 1
158 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
159 and y2, e ; y2 = (f^g)&e
160 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
161 vpor XTMP2, XTMP2, XTMP3 ; XTMP2 = W[-15] ror 1
162 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
163 xor y2, g ; y2 = CH = ((f^g)&e)^g
164 add y2, y0 ; y2 = S1 + CH
165 vpsrlq XTMP3, XTMP1, 8
166 add y2, [rsp + _XFER + 0*8] ; y2 = k + w + S1 + CH
167 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
168 mov y0, a ; y0 = a
169 vpsllq X0, XTMP1, (64-8)
170 add h, y2 ; h = h + S1 + CH + k + w
171 mov y2, a ; y2 = a
172 or y0, c ; y0 = a|c
173 vpor X0, X0, XTMP3
174 add d, h ; d = d + t1
175 and y2, c ; y2 = a&c
176 and y0, b ; y0 = (a|c)&b
177 vpsrlq XTMP1, XTMP1, 7 ; X0 = W[-15] >> 7
178 add h, y1 ; h = t1 + S0
179 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
180 vpxor XTMP1, XTMP1, XTMP2 ; XTMP1 = W[-15] ror 1 ^ W[-15] ror 8
181 add h, y0 ; h = t1 + S0 + MAJ
182 vpxor XTMP1, XTMP1, X0 ; XTMP1 = s0
183
184
185ROTATE_ARGS
186 ;; compute s1
187 vpaddq XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0
188 mov y0, e ; y0 = e
189 mov y1, a ; y1 = a
190 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
191 vpsllq XTMP3, X7, (64-19)
192 xor y0, e ; y0 = e ^ (e >> (41-18))
193 mov y2, f ; y2 = f
194 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
195 vpsrlq X0, X7, 19
196 xor y1, a ; y1 = a ^ (a >> (39-34)
197 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
198 vpor XTMP3, XTMP3, X0 ; XTMP3 = W[-2] ror 19
199 xor y2, g ; y2 = f^g
200 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
201 vpsllq XTMP2, X7, (64-61)
202 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (41-14))
203 and y2, e ; y2 = (f^g)&e
204 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
205 vpsrlq XTMP1, X7, 61
206 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
207 xor y2, g ; y2 = CH = ((f^g)&e)^g
208 add y2, y0 ; y2 = S1 + CH
209 vpor XTMP2, XTMP2, XTMP1 ; XTMP2 = W[-2] ror 61
210 add y2, [rsp + _XFER + 1*8] ; y2 = k + w + S1 + CH
211 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
212 mov y0, a ; y0 = a
213 vpsrlq XTMP1, X7, 6 ; XTMP1 = W[-2] >> 6
214 add h, y2 ; h = h + S1 + CH + k + w
215 mov y2, a ; y2 = a
216 or y0, c ; y0 = a|c
217 vpxor XTMP1, XTMP1, XTMP2
218 add d, h ; d = d + t1
219 and y2, c ; y2 = a&c
220 and y0, b ; y0 = (a|c)&b
221 vpxor X0, XTMP3, XTMP1 ; X0 = s1
222 add h, y1 ; h = t1 + S0
223 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
224 add h, y0 ; h = t1 + S0 + MAJ
225 vpaddq X0, X0, XTMP0 ; X0 = {W[1], W[0]}
226
227ROTATE_ARGS
228rotate_Xs
229%endm
230
231;; input is [rsp + _XFER + %1 * 8]
232%macro DO_ROUND 1
233 mov y0, e ; y0 = e
234 MY_ROR y0, (41-18) ; y0 = e >> (41-18)
235 mov y1, a ; y1 = a
236 xor y0, e ; y0 = e ^ (e >> (41-18))
237 MY_ROR y1, (39-34) ; y1 = a >> (39-34)
238 mov y2, f ; y2 = f
239 xor y1, a ; y1 = a ^ (a >> (39-34)
240 MY_ROR y0, (18-14) ; y0 = (e >> (18-14)) ^ (e >> (41-14))
241 xor y2, g ; y2 = f^g
242 xor y0, e ; y0 = e ^ (e >> (18-14)) ^ (e >> (25-6))
243 MY_ROR y1, (34-28) ; y1 = (a >> (34-28)) ^ (a >> (39-28))
244 and y2, e ; y2 = (f^g)&e
245 xor y1, a ; y1 = a ^ (a >> (34-28)) ^ (a >> (39-28))
246 MY_ROR y0, 14 ; y0 = S1 = (e>>14) & (e>>18) ^ (e>>41)
247 xor y2, g ; y2 = CH = ((f^g)&e)^g
248 add y2, y0 ; y2 = S1 + CH
249 MY_ROR y1, 28 ; y1 = S0 = (a>>28) ^ (a>>34) ^ (a>>39)
250 add y2, [rsp + _XFER + %1*8] ; y2 = k + w + S1 + CH
251 mov y0, a ; y0 = a
252 add h, y2 ; h = h + S1 + CH + k + w
253 mov y2, a ; y2 = a
254 or y0, c ; y0 = a|c
255 add d, h ; d = d + t1
256 and y2, c ; y2 = a&c
257 and y0, b ; y0 = (a|c)&b
258 add h, y1 ; h = t1 + S0
259 or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c)
260 add h, y0 ; h = t1 + S0 + MAJ
261 ROTATE_ARGS
262%endm
263
264section .data
265default rel
266align 64
267K512:
268 dq 0x428a2f98d728ae22,0x7137449123ef65cd
269 dq 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
270 dq 0x3956c25bf348b538,0x59f111f1b605d019
271 dq 0x923f82a4af194f9b,0xab1c5ed5da6d8118
272 dq 0xd807aa98a3030242,0x12835b0145706fbe
273 dq 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
274 dq 0x72be5d74f27b896f,0x80deb1fe3b1696b1
275 dq 0x9bdc06a725c71235,0xc19bf174cf692694
276 dq 0xe49b69c19ef14ad2,0xefbe4786384f25e3
277 dq 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
278 dq 0x2de92c6f592b0275,0x4a7484aa6ea6e483
279 dq 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
280 dq 0x983e5152ee66dfab,0xa831c66d2db43210
281 dq 0xb00327c898fb213f,0xbf597fc7beef0ee4
282 dq 0xc6e00bf33da88fc2,0xd5a79147930aa725
283 dq 0x06ca6351e003826f,0x142929670a0e6e70
284 dq 0x27b70a8546d22ffc,0x2e1b21385c26c926
285 dq 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
286 dq 0x650a73548baf63de,0x766a0abb3c77b2a8
287 dq 0x81c2c92e47edaee6,0x92722c851482353b
288 dq 0xa2bfe8a14cf10364,0xa81a664bbc423001
289 dq 0xc24b8b70d0f89791,0xc76c51a30654be30
290 dq 0xd192e819d6ef5218,0xd69906245565a910
291 dq 0xf40e35855771202a,0x106aa07032bbd1b8
292 dq 0x19a4c116b8d2d0c8,0x1e376c085141ab53
293 dq 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
294 dq 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
295 dq 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
296 dq 0x748f82ee5defb2fc,0x78a5636f43172f60
297 dq 0x84c87814a1f0ab72,0x8cc702081a6439ec
298 dq 0x90befffa23631e28,0xa4506cebde82bde9
299 dq 0xbef9a3f7b2c67915,0xc67178f2e372532b
300 dq 0xca273eceea26619c,0xd186b8c721c0c207
301 dq 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
302 dq 0x06f067aa72176fba,0x0a637dc5a2c898a6
303 dq 0x113f9804bef90dae,0x1b710b35131c471b
304 dq 0x28db77f523047d84,0x32caab7b40c72493
305 dq 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
306 dq 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
307 dq 0x5fcb6fab3ad6faec,0x6c44198c4a475817
308
11fdf7f2
TL
309align 16
310PSHUFFLE_BYTE_FLIP_MASK: ;ddq 0x08090a0b0c0d0e0f0001020304050607
311 dq 0x0001020304050607, 0x08090a0b0c0d0e0f
312
313
314;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
315;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
316;; void FUNC(void *input_data, UINT64 digest[8])
317;; arg 1 : pointer to input data
318;; arg 2 : pointer to digest
319section .text
320MKGLOBAL(FUNC,function,)
321align 32
322FUNC:
323 push rbx
324%ifndef LINUX
325 push rsi
326 push rdi
327%endif
328 push rbp
329 push r13
330 push r14
331 push r15
332
333 sub rsp,STACK_size
334%ifndef LINUX
335 vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6
336 vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7
337 vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8
338 vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9
339 vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10
340 vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11
341 vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12
342 vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13
343%endif
344
345 ;; load initial digest
9f95a23c
TL
346 mov a, [8*0 + CTX]
347 mov b, [8*1 + CTX]
348 mov c, [8*2 + CTX]
349 mov d, [8*3 + CTX]
350 mov e, [8*4 + CTX]
351 mov f, [8*5 + CTX]
352 mov g, [8*6 + CTX]
353 mov h, [8*7 + CTX]
11fdf7f2
TL
354
355 vmovdqa BYTE_FLIP_MASK, [rel PSHUFFLE_BYTE_FLIP_MASK]
356
357 lea TBL,[rel K512]
358
359 ;; byte swap first 16 qwords
360 COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK
361 COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK
362 COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK
363 COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK
364 COPY_XMM_AND_BSWAP X4, [INP + 4*16], BYTE_FLIP_MASK
365 COPY_XMM_AND_BSWAP X5, [INP + 5*16], BYTE_FLIP_MASK
366 COPY_XMM_AND_BSWAP X6, [INP + 6*16], BYTE_FLIP_MASK
367 COPY_XMM_AND_BSWAP X7, [INP + 7*16], BYTE_FLIP_MASK
368
369 ;; schedule 64 input qwords, by doing 4 iterations of 16 rounds
370 mov SRND, 4
371align 16
372loop1:
373
374%assign i 0
375%rep 7
376 vpaddq XFER, X0, [TBL + i*16]
377 vmovdqa [rsp + _XFER], XFER
378 TWO_ROUNDS_AND_SCHED
379%assign i (i+1)
380%endrep
381
382 vpaddq XFER, X0, [TBL + 7*16]
383 vmovdqa [rsp + _XFER], XFER
384 add TBL, 8*16
385 TWO_ROUNDS_AND_SCHED
386
387 sub SRND, 1
388 jne loop1
389
390 mov SRND, 2
391 jmp loop2a
392loop2:
393 vmovdqa X0, X4
394 vmovdqa X1, X5
395 vmovdqa X2, X6
396 vmovdqa X3, X7
397
398loop2a:
399 vpaddq X0, X0, [TBL + 0*16]
400 vmovdqa [rsp + _XFER], X0
401 DO_ROUND 0
402 DO_ROUND 1
403
404 vpaddq X1, X1, [TBL + 1*16]
405 vmovdqa [rsp + _XFER], X1
406 DO_ROUND 0
407 DO_ROUND 1
408
409 vpaddq X2, X2, [TBL + 2*16]
410 vmovdqa [rsp + _XFER], X2
411 DO_ROUND 0
412 DO_ROUND 1
413
414 vpaddq X3, X3, [TBL + 3*16]
415 vmovdqa [rsp + _XFER], X3
416 add TBL, 4*16
417 DO_ROUND 0
418 DO_ROUND 1
419
420 sub SRND, 1
421 jne loop2
422
9f95a23c
TL
423 add [8*0 + CTX], a
424 add [8*1 + CTX], b
425 add [8*2 + CTX], c
426 add [8*3 + CTX], d
427 add [8*4 + CTX], e
428 add [8*5 + CTX], f
429 add [8*6 + CTX], g
430 add [8*7 + CTX], h
11fdf7f2
TL
431
432done_hash:
433%ifndef LINUX
434 vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16]
435 vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16]
436 vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16]
437 vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16]
438 vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16]
439 vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16]
440 vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16]
441 vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16]
442%endif
443
444 add rsp, STACK_size
445
446 pop r15
447 pop r14
448 pop r13
449 pop rbp
450%ifndef LINUX
451 pop rdi
452 pop rsi
453%endif
454 pop rbx
455
456 ret
457
458
459%ifdef LINUX
460section .note.GNU-stack noalloc noexec nowrite progbits
461%endif