1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
30 %include "sha512_job.asm"
31 %include "sha512_mb_mgr_datastruct.asm"
33 %include "reg_sizes.asm"
35 extern sha512_mb_x4_avx2
37 %ifidn __OUTPUT_FORMAT__, elf64
38 ; LINUX register definitions
39 %define arg1 rdi ; rcx
40 %define arg2 rsi ; rdx
42 ; idx needs to be other than arg1, arg2, rbx, r12
44 %define last_len rdx ; rsi
46 %define size_offset rcx ; rdi
47 %define tmp2 rcx ; rdi
50 ; WINDOWS register definitions
54 ; idx needs to be other than arg1, arg2, rbx, r12
58 %define size_offset rdi
70 %define start_offset r11
72 %define unused_lanes rbx
81 %define extra_blocks r8
96 ; STACK_SPACE needs to be an odd multiple of 8
97 %define _XMM_SAVE stack_frame.gpr
98 %define _GPR_SAVE stack_frame.rsp
99 %define STACK_SPACE stack_frame_size
101 ; SHA512_JOB* sha512_mb_mgr_submit_avx2(SHA512_MB_JOB_MGR *state, SHA512_JOB *job)
102 ; arg 1 : rcx : state
104 mk_global sha512_mb_mgr_submit_avx2, function
105 sha512_mb_mgr_submit_avx2:
113 mov [rsp + stack_frame.rsp], rax
115 mov [rsp + _XMM_SAVE + 8*0], rbx
116 mov [rsp + _XMM_SAVE + 8*1], rbp
117 mov [rsp + _XMM_SAVE + 8*2], r12
118 %ifidn __OUTPUT_FORMAT__, win64
119 mov [rsp + _XMM_SAVE + 8*3], rsi
120 mov [rsp + _XMM_SAVE + 8*4], rdi
121 vmovdqa [rsp + 16*0], xmm6
122 vmovdqa [rsp + 16*1], xmm7
123 vmovdqa [rsp + 16*2], xmm8
124 vmovdqa [rsp + 16*3], xmm9
125 vmovdqa [rsp + 16*4], xmm10
126 vmovdqa [rsp + 16*5], xmm11
127 vmovdqa [rsp + 16*6], xmm12
128 vmovdqa [rsp + 16*7], xmm13
129 vmovdqa [rsp + 16*8], xmm14
130 vmovdqa [rsp + 16*9], xmm15
133 mov unused_lanes, [state + _unused_lanes]
134 movzx lane, BYTE(unused_lanes)
136 imul lane_data, lane, _LANE_DATA_size
137 mov dword [job + _status], STS_BEING_PROCESSED
138 lea lane_data, [state + _ldata + lane_data]
139 mov [state + _unused_lanes], unused_lanes
140 mov DWORD(len), [job + _len]
142 mov [lane_data + _job_in_lane], job
143 mov [state + _lens + 4 + 8*lane], DWORD(len)
146 ; Load digest words from result_digest
147 vmovdqa xmm0, [job + _result_digest + 0*16]
148 vmovdqa xmm1, [job + _result_digest + 1*16]
149 vmovdqa xmm2, [job + _result_digest + 2*16]
150 vmovdqa xmm3, [job + _result_digest + 3*16]
151 vmovq [state + _args_digest + 8*lane + 0*32], xmm0
152 vpextrq [state + _args_digest + 8*lane + 1*32], xmm0, 1
153 vmovq [state + _args_digest + 8*lane + 2*32], xmm1
154 vpextrq [state + _args_digest + 8*lane + 3*32], xmm1, 1
155 vmovq [state + _args_digest + 8*lane + 4*32], xmm2
156 vpextrq [state + _args_digest + 8*lane + 5*32], xmm2, 1
157 vmovq [state + _args_digest + 8*lane + 6*32], xmm3
158 vpextrq [state + _args_digest + 8*lane + 7*32], xmm3, 1
160 mov p, [job + _buffer]
161 mov [state + _args_data_ptr + 8*lane], p
163 add dword [state + _num_lanes_inuse], 1
164 cmp unused_lanes, 0xff
170 mov lens0, [state + _lens + 0*8]
172 mov lens1, [state + _lens + 1*8]
175 mov lens2, [state + _lens + 2*8]
178 mov lens3, [state + _lens + 3*8]
191 mov [state + _lens + 0*8], lens0
192 mov [state + _lens + 1*8], lens1
193 mov [state + _lens + 2*8], lens2
194 mov [state + _lens + 3*8], lens3
196 ; "state" and "args" are the same address, arg1
198 call sha512_mb_x4_avx2
199 ; state and idx are intact
203 ; process completed job "idx"
204 imul lane_data, idx, _LANE_DATA_size
205 lea lane_data, [state + _ldata + lane_data]
207 mov job_rax, [lane_data + _job_in_lane]
210 mov unused_lanes, [state + _unused_lanes]
211 mov qword [lane_data + _job_in_lane], 0
212 mov dword [job_rax + _status], STS_COMPLETED
215 mov [state + _unused_lanes], unused_lanes
217 sub dword [state + _num_lanes_inuse], 1
219 vmovq xmm0, [state + _args_digest + 8*idx + 0*32]
220 vpinsrq xmm0, [state + _args_digest + 8*idx + 1*32], 1
221 vmovq xmm1, [state + _args_digest + 8*idx + 2*32]
222 vpinsrq xmm1, [state + _args_digest + 8*idx + 3*32], 1
223 vmovq xmm2, [state + _args_digest + 8*idx + 4*32]
224 vpinsrq xmm2, [state + _args_digest + 8*idx + 5*32], 1
225 vmovq xmm3, [state + _args_digest + 8*idx + 6*32]
226 vpinsrq xmm3, [state + _args_digest + 8*idx + 7*32], 1
227 vmovdqa [job_rax + _result_digest + 0*16], xmm0
228 vmovdqa [job_rax + _result_digest + 1*16], xmm1
229 vmovdqa [job_rax + _result_digest + 2*16], xmm2
230 vmovdqa [job_rax + _result_digest + 3*16], xmm3
234 %ifidn __OUTPUT_FORMAT__, win64
235 vmovdqa xmm6, [rsp + 16*0]
236 vmovdqa xmm7, [rsp + 16*1]
237 vmovdqa xmm8, [rsp + 16*2]
238 vmovdqa xmm9, [rsp + 16*3]
239 vmovdqa xmm10, [rsp + 16*4]
240 vmovdqa xmm11, [rsp + 16*5]
241 vmovdqa xmm12, [rsp + 16*6]
242 vmovdqa xmm13, [rsp + 16*7]
243 vmovdqa xmm14, [rsp + 16*8]
244 vmovdqa xmm15, [rsp + 16*9]
245 mov rsi, [rsp + _XMM_SAVE + 8*3]
246 mov rdi, [rsp + _XMM_SAVE + 8*4]
248 mov rbx, [rsp + _XMM_SAVE + 8*0]
249 mov rbp, [rsp + _XMM_SAVE + 8*1]
250 mov r12, [rsp + _XMM_SAVE + 8*2]
251 mov rsp, [rsp + stack_frame.rsp]
259 section .data align=16