]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
Merge remote-tracking branch 'mkp-scsi/fixes' into fixes
[mirror_ubuntu-artful-kernel.git] / arch / x86 / crypto / sha512-mb / sha512_x4_avx2.S
1 /*
2 * Multi-buffer SHA512 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54 # code to compute quad SHA512 using AVX2
55 # use YMMs to tackle the larger digest size
56 # outer calling routine takes care of save and restore of XMM registers
57 # Logic designed/laid out by JDG
58
59 # Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
60 # Stack must be aligned to 32 bytes before call
61 # Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
62 # Linux preserves: rcx rdx rdi rbp r13 r14 r15
63 # clobbers ymm0-15
64
65 #include <linux/linkage.h>
66 #include "sha512_mb_mgr_datastruct.S"
67
68 arg1 = %rdi
69 arg2 = %rsi
70
71 # Common definitions
72 STATE = arg1
73 INP_SIZE = arg2
74
75 IDX = %rax
76 ROUND = %rbx
77 TBL = %r8
78
79 inp0 = %r9
80 inp1 = %r10
81 inp2 = %r11
82 inp3 = %r12
83
84 a = %ymm0
85 b = %ymm1
86 c = %ymm2
87 d = %ymm3
88 e = %ymm4
89 f = %ymm5
90 g = %ymm6
91 h = %ymm7
92
93 a0 = %ymm8
94 a1 = %ymm9
95 a2 = %ymm10
96
97 TT0 = %ymm14
98 TT1 = %ymm13
99 TT2 = %ymm12
100 TT3 = %ymm11
101 TT4 = %ymm10
102 TT5 = %ymm9
103
104 T1 = %ymm14
105 TMP = %ymm15
106
107 # Define stack usage
108 STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
109
110 #define VMOVPD vmovupd
111 _digest = SZ4*16
112
113 # transpose r0, r1, r2, r3, t0, t1
114 # "transpose" data in {r0..r3} using temps {t0..t3}
115 # Input looks like: {r0 r1 r2 r3}
116 # r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
117 # r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
118 # r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
119 # r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
120 #
121 # output looks like: {t0 r1 r0 r3}
122 # t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
123 # r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
124 # r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
125 # r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
126
127 .macro TRANSPOSE r0 r1 r2 r3 t0 t1
128 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
129 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
130 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
131 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
132
133 vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
134 vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
135 vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
136 vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
137 .endm
138
139 .macro ROTATE_ARGS
140 TMP_ = h
141 h = g
142 g = f
143 f = e
144 e = d
145 d = c
146 c = b
147 b = a
148 a = TMP_
149 .endm
150
151 # PRORQ reg, imm, tmp
152 # packed-rotate-right-double
153 # does a rotate by doing two shifts and an or
154 .macro _PRORQ reg imm tmp
155 vpsllq $(64-\imm),\reg,\tmp
156 vpsrlq $\imm,\reg, \reg
157 vpor \tmp,\reg, \reg
158 .endm
159
160 # non-destructive
161 # PRORQ_nd reg, imm, tmp, src
162 .macro _PRORQ_nd reg imm tmp src
163 vpsllq $(64-\imm), \src, \tmp
164 vpsrlq $\imm, \src, \reg
165 vpor \tmp, \reg, \reg
166 .endm
167
168 # PRORQ dst/src, amt
169 .macro PRORQ reg imm
170 _PRORQ \reg, \imm, TMP
171 .endm
172
173 # PRORQ_nd dst, src, amt
174 .macro PRORQ_nd reg tmp imm
175 _PRORQ_nd \reg, \imm, TMP, \tmp
176 .endm
177
178 #; arguments passed implicitly in preprocessor symbols i, a...h
179 .macro ROUND_00_15 _T1 i
180 PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
181
182 vpxor g, f, a2 # ch: a2 = f^g
183 vpand e,a2, a2 # ch: a2 = (f^g)&e
184 vpxor g, a2, a2 # a2 = ch
185
186 PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
187
188 offset = SZ4*(\i & 0xf)
189 vmovdqu \_T1,offset(%rsp)
190 vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
191 vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
192 PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
193 vpaddq a2, h, h # h = h + ch
194 PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
195 vpaddq \_T1,h, h # h = h + ch + W + K
196 vpxor a1, a0, a0 # a0 = sigma1
197 vmovdqu a,\_T1
198 PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
199 vpxor c, \_T1, \_T1 # maj: T1 = a^c
200 add $SZ4, ROUND # ROUND++
201 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
202 vpaddq a0, h, h
203 vpaddq h, d, d
204 vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
205 PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
206 vpxor a1, a2, a2 # a2 = sig0
207 vpand c, a, a1 # maj: a1 = a&c
208 vpor \_T1, a1, a1 # a1 = maj
209 vpaddq a1, h, h # h = h + ch + W + K + maj
210 vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
211 ROTATE_ARGS
212 .endm
213
214
215 #; arguments passed implicitly in preprocessor symbols i, a...h
216 .macro ROUND_16_XX _T1 i
217 vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
218 vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
219 vmovdqu \_T1, a0
220 PRORQ \_T1,7
221 vmovdqu a1, a2
222 PRORQ a1,42
223 vpxor a0, \_T1, \_T1
224 PRORQ \_T1, 1
225 vpxor a2, a1, a1
226 PRORQ a1, 19
227 vpsrlq $7, a0, a0
228 vpxor a0, \_T1, \_T1
229 vpsrlq $6, a2, a2
230 vpxor a2, a1, a1
231 vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
232 vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
233 vpaddq a1, \_T1, \_T1
234
235 ROUND_00_15 \_T1,\i
236 .endm
237
238
239 # void sha512_x4_avx2(void *STATE, const int INP_SIZE)
240 # arg 1 : STATE : pointer to input data
241 # arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
242 ENTRY(sha512_x4_avx2)
243 # general registers preserved in outer calling routine
244 # outer calling routine saves all the XMM registers
245 # save callee-saved clobbered registers to comply with C function ABI
246 push %r12
247 push %r13
248 push %r14
249 push %r15
250
251 sub $STACK_SPACE1, %rsp
252
253 # Load the pre-transposed incoming digest.
254 vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
255 vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
256 vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
257 vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
258 vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
259 vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
260 vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
261 vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
262
263 lea K512_4(%rip),TBL
264
265 # load the address of each of the 4 message lanes
266 # getting ready to transpose input onto stack
267 mov _data_ptr+0*PTR_SZ(STATE),inp0
268 mov _data_ptr+1*PTR_SZ(STATE),inp1
269 mov _data_ptr+2*PTR_SZ(STATE),inp2
270 mov _data_ptr+3*PTR_SZ(STATE),inp3
271
272 xor IDX, IDX
273 lloop:
274 xor ROUND, ROUND
275
276 # save old digest
277 vmovdqu a, _digest(%rsp)
278 vmovdqu b, _digest+1*SZ4(%rsp)
279 vmovdqu c, _digest+2*SZ4(%rsp)
280 vmovdqu d, _digest+3*SZ4(%rsp)
281 vmovdqu e, _digest+4*SZ4(%rsp)
282 vmovdqu f, _digest+5*SZ4(%rsp)
283 vmovdqu g, _digest+6*SZ4(%rsp)
284 vmovdqu h, _digest+7*SZ4(%rsp)
285 i = 0
286 .rep 4
287 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
288 VMOVPD i*32(inp0, IDX), TT2
289 VMOVPD i*32(inp1, IDX), TT1
290 VMOVPD i*32(inp2, IDX), TT4
291 VMOVPD i*32(inp3, IDX), TT3
292 TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
293 vpshufb TMP, TT0, TT0
294 vpshufb TMP, TT1, TT1
295 vpshufb TMP, TT2, TT2
296 vpshufb TMP, TT3, TT3
297 ROUND_00_15 TT0,(i*4+0)
298 ROUND_00_15 TT1,(i*4+1)
299 ROUND_00_15 TT2,(i*4+2)
300 ROUND_00_15 TT3,(i*4+3)
301 i = (i+1)
302 .endr
303 add $128, IDX
304
305 i = (i*4)
306
307 jmp Lrounds_16_xx
308 .align 16
309 Lrounds_16_xx:
310 .rep 16
311 ROUND_16_XX T1, i
312 i = (i+1)
313 .endr
314 cmp $0xa00,ROUND
315 jb Lrounds_16_xx
316
317 # add old digest
318 vpaddq _digest(%rsp), a, a
319 vpaddq _digest+1*SZ4(%rsp), b, b
320 vpaddq _digest+2*SZ4(%rsp), c, c
321 vpaddq _digest+3*SZ4(%rsp), d, d
322 vpaddq _digest+4*SZ4(%rsp), e, e
323 vpaddq _digest+5*SZ4(%rsp), f, f
324 vpaddq _digest+6*SZ4(%rsp), g, g
325 vpaddq _digest+7*SZ4(%rsp), h, h
326
327 sub $1, INP_SIZE # unit is blocks
328 jne lloop
329
330 # write back to memory (state object) the transposed digest
331 vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
332 vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
333 vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
334 vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
335 vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
336 vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
337 vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
338 vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
339
340 # update input data pointers
341 add IDX, inp0
342 mov inp0, _data_ptr+0*PTR_SZ(STATE)
343 add IDX, inp1
344 mov inp1, _data_ptr+1*PTR_SZ(STATE)
345 add IDX, inp2
346 mov inp2, _data_ptr+2*PTR_SZ(STATE)
347 add IDX, inp3
348 mov inp3, _data_ptr+3*PTR_SZ(STATE)
349
350 #;;;;;;;;;;;;;;;
351 #; Postamble
352 add $STACK_SPACE1, %rsp
353 # restore callee-saved clobbered registers
354
355 pop %r15
356 pop %r14
357 pop %r13
358 pop %r12
359
360 # outer calling routine restores XMM and other GP registers
361 ret
362 ENDPROC(sha512_x4_avx2)
363
364 .section .rodata.K512_4, "a", @progbits
365 .align 64
366 K512_4:
367 .octa 0x428a2f98d728ae22428a2f98d728ae22,\
368 0x428a2f98d728ae22428a2f98d728ae22
369 .octa 0x7137449123ef65cd7137449123ef65cd,\
370 0x7137449123ef65cd7137449123ef65cd
371 .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
372 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
373 .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
374 0xe9b5dba58189dbbce9b5dba58189dbbc
375 .octa 0x3956c25bf348b5383956c25bf348b538,\
376 0x3956c25bf348b5383956c25bf348b538
377 .octa 0x59f111f1b605d01959f111f1b605d019,\
378 0x59f111f1b605d01959f111f1b605d019
379 .octa 0x923f82a4af194f9b923f82a4af194f9b,\
380 0x923f82a4af194f9b923f82a4af194f9b
381 .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
382 0xab1c5ed5da6d8118ab1c5ed5da6d8118
383 .octa 0xd807aa98a3030242d807aa98a3030242,\
384 0xd807aa98a3030242d807aa98a3030242
385 .octa 0x12835b0145706fbe12835b0145706fbe,\
386 0x12835b0145706fbe12835b0145706fbe
387 .octa 0x243185be4ee4b28c243185be4ee4b28c,\
388 0x243185be4ee4b28c243185be4ee4b28c
389 .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
390 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
391 .octa 0x72be5d74f27b896f72be5d74f27b896f,\
392 0x72be5d74f27b896f72be5d74f27b896f
393 .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
394 0x80deb1fe3b1696b180deb1fe3b1696b1
395 .octa 0x9bdc06a725c712359bdc06a725c71235,\
396 0x9bdc06a725c712359bdc06a725c71235
397 .octa 0xc19bf174cf692694c19bf174cf692694,\
398 0xc19bf174cf692694c19bf174cf692694
399 .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
400 0xe49b69c19ef14ad2e49b69c19ef14ad2
401 .octa 0xefbe4786384f25e3efbe4786384f25e3,\
402 0xefbe4786384f25e3efbe4786384f25e3
403 .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
404 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
405 .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
406 0x240ca1cc77ac9c65240ca1cc77ac9c65
407 .octa 0x2de92c6f592b02752de92c6f592b0275,\
408 0x2de92c6f592b02752de92c6f592b0275
409 .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
410 0x4a7484aa6ea6e4834a7484aa6ea6e483
411 .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
412 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
413 .octa 0x76f988da831153b576f988da831153b5,\
414 0x76f988da831153b576f988da831153b5
415 .octa 0x983e5152ee66dfab983e5152ee66dfab,\
416 0x983e5152ee66dfab983e5152ee66dfab
417 .octa 0xa831c66d2db43210a831c66d2db43210,\
418 0xa831c66d2db43210a831c66d2db43210
419 .octa 0xb00327c898fb213fb00327c898fb213f,\
420 0xb00327c898fb213fb00327c898fb213f
421 .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
422 0xbf597fc7beef0ee4bf597fc7beef0ee4
423 .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
424 0xc6e00bf33da88fc2c6e00bf33da88fc2
425 .octa 0xd5a79147930aa725d5a79147930aa725,\
426 0xd5a79147930aa725d5a79147930aa725
427 .octa 0x06ca6351e003826f06ca6351e003826f,\
428 0x06ca6351e003826f06ca6351e003826f
429 .octa 0x142929670a0e6e70142929670a0e6e70,\
430 0x142929670a0e6e70142929670a0e6e70
431 .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
432 0x27b70a8546d22ffc27b70a8546d22ffc
433 .octa 0x2e1b21385c26c9262e1b21385c26c926,\
434 0x2e1b21385c26c9262e1b21385c26c926
435 .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
436 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
437 .octa 0x53380d139d95b3df53380d139d95b3df,\
438 0x53380d139d95b3df53380d139d95b3df
439 .octa 0x650a73548baf63de650a73548baf63de,\
440 0x650a73548baf63de650a73548baf63de
441 .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
442 0x766a0abb3c77b2a8766a0abb3c77b2a8
443 .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
444 0x81c2c92e47edaee681c2c92e47edaee6
445 .octa 0x92722c851482353b92722c851482353b,\
446 0x92722c851482353b92722c851482353b
447 .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
448 0xa2bfe8a14cf10364a2bfe8a14cf10364
449 .octa 0xa81a664bbc423001a81a664bbc423001,\
450 0xa81a664bbc423001a81a664bbc423001
451 .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
452 0xc24b8b70d0f89791c24b8b70d0f89791
453 .octa 0xc76c51a30654be30c76c51a30654be30,\
454 0xc76c51a30654be30c76c51a30654be30
455 .octa 0xd192e819d6ef5218d192e819d6ef5218,\
456 0xd192e819d6ef5218d192e819d6ef5218
457 .octa 0xd69906245565a910d69906245565a910,\
458 0xd69906245565a910d69906245565a910
459 .octa 0xf40e35855771202af40e35855771202a,\
460 0xf40e35855771202af40e35855771202a
461 .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
462 0x106aa07032bbd1b8106aa07032bbd1b8
463 .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
464 0x19a4c116b8d2d0c819a4c116b8d2d0c8
465 .octa 0x1e376c085141ab531e376c085141ab53,\
466 0x1e376c085141ab531e376c085141ab53
467 .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
468 0x2748774cdf8eeb992748774cdf8eeb99
469 .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
470 0x34b0bcb5e19b48a834b0bcb5e19b48a8
471 .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
472 0x391c0cb3c5c95a63391c0cb3c5c95a63
473 .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
474 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
475 .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
476 0x5b9cca4f7763e3735b9cca4f7763e373
477 .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
478 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
479 .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
480 0x748f82ee5defb2fc748f82ee5defb2fc
481 .octa 0x78a5636f43172f6078a5636f43172f60,\
482 0x78a5636f43172f6078a5636f43172f60
483 .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
484 0x84c87814a1f0ab7284c87814a1f0ab72
485 .octa 0x8cc702081a6439ec8cc702081a6439ec,\
486 0x8cc702081a6439ec8cc702081a6439ec
487 .octa 0x90befffa23631e2890befffa23631e28,\
488 0x90befffa23631e2890befffa23631e28
489 .octa 0xa4506cebde82bde9a4506cebde82bde9,\
490 0xa4506cebde82bde9a4506cebde82bde9
491 .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
492 0xbef9a3f7b2c67915bef9a3f7b2c67915
493 .octa 0xc67178f2e372532bc67178f2e372532b,\
494 0xc67178f2e372532bc67178f2e372532b
495 .octa 0xca273eceea26619cca273eceea26619c,\
496 0xca273eceea26619cca273eceea26619c
497 .octa 0xd186b8c721c0c207d186b8c721c0c207,\
498 0xd186b8c721c0c207d186b8c721c0c207
499 .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
500 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
501 .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
502 0xf57d4f7fee6ed178f57d4f7fee6ed178
503 .octa 0x06f067aa72176fba06f067aa72176fba,\
504 0x06f067aa72176fba06f067aa72176fba
505 .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
506 0x0a637dc5a2c898a60a637dc5a2c898a6
507 .octa 0x113f9804bef90dae113f9804bef90dae,\
508 0x113f9804bef90dae113f9804bef90dae
509 .octa 0x1b710b35131c471b1b710b35131c471b,\
510 0x1b710b35131c471b1b710b35131c471b
511 .octa 0x28db77f523047d8428db77f523047d84,\
512 0x28db77f523047d8428db77f523047d84
513 .octa 0x32caab7b40c7249332caab7b40c72493,\
514 0x32caab7b40c7249332caab7b40c72493
515 .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
516 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
517 .octa 0x431d67c49c100d4c431d67c49c100d4c,\
518 0x431d67c49c100d4c431d67c49c100d4c
519 .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
520 0x4cc5d4becb3e42b64cc5d4becb3e42b6
521 .octa 0x597f299cfc657e2a597f299cfc657e2a,\
522 0x597f299cfc657e2a597f299cfc657e2a
523 .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
524 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
525 .octa 0x6c44198c4a4758176c44198c4a475817,\
526 0x6c44198c4a4758176c44198c4a475817
527
528 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
529 .align 32
530 PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
531 .octa 0x18191a1b1c1d1e1f1011121314151617