1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 %include "sha1_mb_mgr_datastruct.asm"
31 %include "reg_sizes.asm"
35 ;; code to compute quad SHA1 using AVX
36 ;; derived from ...\sha1_multiple\sha1_quad4.asm
37 ;; variation of sha1_mult2.asm : clobbers all xmm regs, rcx left intact
39 ; transpose r0, r1, r2, r3, t0, t1
40 ; "transpose" data in {r0..r3} using temps {t0..t3}
41 ; Input looks like: {r0 r1 r2 r3}
47 ; output looks like: {t0 r1 r0 r3}
60 vshufps %%t0, %%r0, %%r1, 0x44 ; t0 = {b1 b0 a1 a0}
61 vshufps %%r0, %%r0, %%r1, 0xEE ; r0 = {b3 b2 a3 a2}
63 vshufps %%t1, %%r2, %%r3, 0x44 ; t1 = {d1 d0 c1 c0}
64 vshufps %%r2, %%r2, %%r3, 0xEE ; r2 = {d3 d2 c3 c2}
66 vshufps %%r1, %%t0, %%t1, 0xDD ; r1 = {d1 c1 b1 a1}
68 vshufps %%r3, %%r0, %%r2, 0xDD ; r3 = {d3 c3 b3 a3}
70 vshufps %%r0, %%r0, %%r2, 0x88 ; r0 = {d2 c2 b2 a2}
71 vshufps %%t0, %%t0, %%t1, 0x88 ; t0 = {d0 c0 b0 a0}
74 ;; Magic functions defined in FIPS 180-1
76 ; macro MAGIC_F0 F,B,C,D,T ;; F = (D ^ (B & (C ^ D)))
83 vpxor %%regF, %%regC,%%regD
84 vpand %%regF, %%regF,%%regB
85 vpxor %%regF, %%regF,%%regD
88 ; macro MAGIC_F1 F,B,C,D,T ;; F = (B ^ C ^ D)
95 vpxor %%regF,%%regD,%%regC
96 vpxor %%regF,%%regF,%%regB
99 ; macro MAGIC_F2 F,B,C,D,T ;; F = ((B & C) | (B & D) | (C & D))
106 vpor %%regF,%%regB,%%regC
107 vpand %%regT,%%regB,%%regC
108 vpand %%regF,%%regF,%%regD
109 vpor %%regF,%%regF,%%regT
112 ; macro MAGIC_F3 F,B,C,D,T ;; F = (B ^ C ^ D)
119 MAGIC_F1 %%regF,%%regB,%%regC,%%regD,%%regT
122 ; PROLD reg, imm, tmp
127 vpsrld %%tmp, %%reg, (32-(%%imm))
128 vpslld %%reg, %%reg, %%imm
129 vpor %%reg, %%reg, %%tmp
133 ; PROLD_nd reg, imm, tmp, src
139 vpsrld %%tmp, %%src, (32-(%%imm))
140 vpslld %%reg, %%src, %%imm
141 vpor %%reg, %%reg, %%tmp
144 %macro SHA1_STEP_00_15 10
155 vpaddd %%regE, %%regE,%%immCNT
156 vpaddd %%regE, %%regE,[rsp + (%%memW * 16)]
157 PROLD_nd %%regT,5, %%regF,%%regA
158 vpaddd %%regE, %%regE,%%regT
159 %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
160 PROLD %%regB,30, %%regT
161 vpaddd %%regE, %%regE,%%regF
164 %macro SHA1_STEP_16_79 10
175 vpaddd %%regE, %%regE,%%immCNT
177 vmovdqa W14, [rsp + ((%%memW - 14) & 15) * 16]
179 vpxor W16, W16, [rsp + ((%%memW - 8) & 15) * 16]
180 vpxor W16, W16, [rsp + ((%%memW - 3) & 15) * 16]
182 vpsrld %%regF, W16, (32-1)
184 vpor %%regF, %%regF, W16
187 vmovdqa [rsp + ((%%memW - 0) & 15) * 16],%%regF
188 vpaddd %%regE, %%regE,%%regF
190 PROLD_nd %%regT,5, %%regF, %%regA
191 vpaddd %%regE, %%regE,%%regT
192 %%MAGIC %%regF,%%regB,%%regC,%%regD,%%regT ;; FUN = MAGIC_Fi(B,C,D)
193 PROLD %%regB,30, %%regT
194 vpaddd %%regE,%%regE,%%regF
197 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
198 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
199 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
201 ;; FRAMESZ plus pushes must be an odd multiple of 8
202 %define XMM_SAVE ((15-15)*16 + 1*8)
203 %define FRAMESZ 16*16 + XMM_SAVE
204 %define _XMM FRAMESZ - XMM_SAVE
206 %define VMOVPS vmovups
260 %define DIGEST_SIZE (4*5*4)
263 %ifidn __OUTPUT_FORMAT__, elf64
274 ; void sha1_mb_x4_avx(SHA1_MB_ARGS_X8 *args, uint32_t size_in_blocks);
275 ; arg 1 : ARG1 : pointer to args (only 4 of the 8 lanes used)
276 ; arg 2 : ARG2 : size (in blocks) ;; assumed to be >= 1
278 ; Clobbers registers: ARG2, rax, r8-r11, xmm0-xmm15
280 global sha1_mb_x4_avx:function internal
283 sub rsp, FRAMESZ ;; FRAMESZ + pushes must be odd multiple of 8
285 ;; Initialize digests
286 vmovdqa A, [ARG1 + 0*16]
287 vmovdqa B, [ARG1 + 1*16]
288 vmovdqa C, [ARG1 + 2*16]
289 vmovdqa D, [ARG1 + 3*16]
290 vmovdqa E, [ARG1 + 4*16]
292 ;; load input pointers
293 mov inp0,[ARG1 + _data_ptr + 0*8]
294 mov inp1,[ARG1 + _data_ptr + 1*8]
295 mov inp2,[ARG1 + _data_ptr + 2*8]
296 mov inp3,[ARG1 + _data_ptr + 3*8]
300 vmovdqa F, [PSHUFFLE_BYTE_FLIP_MASK]
307 TRANSPOSE T2, T1, T4, T3, T0, T5
309 vmovdqa [rsp+(I*4+0)*16],T0
311 vmovdqa [rsp+(I*4+1)*16],T1
313 vmovdqa [rsp+(I*4+2)*16],T2
315 vmovdqa [rsp+(I*4+3)*16],T3
328 ;; perform 0-79 steps
334 SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
340 vmovdqa W16, [rsp + ((16 - 16) & 15) * 16]
341 vmovdqa W15, [rsp + ((16 - 15) & 15) * 16]
343 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
351 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
359 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
367 SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
382 vmovdqa [ARG1 + 0*16], A
383 vmovdqa [ARG1 + 1*16], B
384 vmovdqa [ARG1 + 2*16], C
385 vmovdqa [ARG1 + 3*16], D
386 vmovdqa [ARG1 + 4*16], E
388 ; update input pointers
390 mov [ARG1 + _data_ptr + 0*8], inp0
392 mov [ARG1 + _data_ptr + 1*8], inp1
394 mov [ARG1 + _data_ptr + 2*8], inp2
396 mov [ARG1 + _data_ptr + 3*8], inp3
406 section .data align=16
409 PSHUFFLE_BYTE_FLIP_MASK: dq 0x0405060700010203, 0x0c0d0e0f08090a0b
410 K00_19: dq 0x5A8279995A827999, 0x5A8279995A827999
411 K20_39: dq 0x6ED9EBA16ED9EBA1, 0x6ED9EBA16ED9EBA1
412 K40_59: dq 0x8F1BBCDC8F1BBCDC, 0x8F1BBCDC8F1BBCDC
413 K60_79: dq 0xCA62C1D6CA62C1D6, 0xCA62C1D6CA62C1D6