]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha512_mb/sha512_mb_mgr_submit_avx.asm
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha512_mb / sha512_mb_mgr_submit_avx.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "sha512_job.asm"
31 %include "sha512_mb_mgr_datastruct.asm"
32
33 %include "reg_sizes.asm"
34
35 extern sha512_mb_x2_avx
36
37 %ifidn __OUTPUT_FORMAT__, elf64
38 ; Linux register definitions
39 %define arg1 rdi ; rcx
40 %define arg2 rsi ; rdx
41
42 ; idx needs to be other than arg1, arg2, rbx, r12
43 %define idx rdx ; rsi
44 %define last_len rdx ; rsi
45
46 %define size_offset rcx ; rdi
47 %define tmp2 rcx ; rdi
48
49 %else
50 ; WINDOWS register definitions
51 %define arg1 rcx
52 %define arg2 rdx
53
54 ; idx needs to be other than arg1, arg2, rbx, r12
55 %define last_len rsi
56 %define idx rsi
57
58 %define size_offset rdi
59 %define tmp2 rdi
60
61 %endif
62
63 ; Common definitions
64 %define state arg1
65 %define job arg2
66 %define len2 arg2
67 %define p2 arg2
68
69 %define p r11
70 %define start_offset r11
71
72 %define unused_lanes rbx
73
74 %define job_rax rax
75 %define len rax
76
77 %define lane rbp
78 %define tmp3 rbp
79 %define lens3 rbp
80
81 %define extra_blocks r8
82 %define lens0 r8
83
84 %define tmp r9
85 %define lens1 r9
86
87 %define lane_data r10
88 %define lens2 r10
89
90 struc stack_frame
91 .xmm: resb 16*10
92 .gpr: resb 8*5
93 .rsp: resb 8
94 endstruc
95
96 ; STACK_SPACE needs to be an odd multiple of 8
97 %define _XMM_SAVE stack_frame.gpr
98 %define _GPR_SAVE stack_frame.rsp
99 %define STACK_SPACE stack_frame_size
100
101 ; SHA512_JOB* sha512_mb_mgr_submit_avx(SHA512_MB_JOB_MGR *state, SHA512_JOB *job)
102 ; arg 1 : rcx : state
103 ; arg 2 : rdx : job
104 global sha512_mb_mgr_submit_avx:function
105 sha512_mb_mgr_submit_avx:
106
107 mov rax, rsp
108
109 sub rsp, STACK_SPACE
110 and rsp, ~31
111
112 mov [rsp + stack_frame.rsp], rax
113
114 mov [rsp + _XMM_SAVE + 8*0], rbx
115 mov [rsp + _XMM_SAVE + 8*1], rbp
116 mov [rsp + _XMM_SAVE + 8*2], r12
117 %ifidn __OUTPUT_FORMAT__, win64
118 mov [rsp + _XMM_SAVE + 8*3], rsi
119 mov [rsp + _XMM_SAVE + 8*4], rdi
120 vmovdqa [rsp + 16*0], xmm6
121 vmovdqa [rsp + 16*1], xmm7
122 vmovdqa [rsp + 16*2], xmm8
123 vmovdqa [rsp + 16*3], xmm9
124 vmovdqa [rsp + 16*4], xmm10
125 vmovdqa [rsp + 16*5], xmm11
126 vmovdqa [rsp + 16*6], xmm12
127 vmovdqa [rsp + 16*7], xmm13
128 vmovdqa [rsp + 16*8], xmm14
129 vmovdqa [rsp + 16*9], xmm15
130 %endif
131
132 mov unused_lanes, [state + _unused_lanes]
133 movzx lane, BYTE(unused_lanes)
134 shr unused_lanes, 8
135 imul lane_data, lane, _LANE_DATA_size
136 mov dword [job + _status], STS_BEING_PROCESSED
137 lea lane_data, [state + _ldata + lane_data]
138 mov [state + _unused_lanes], unused_lanes
139 mov DWORD(len), [job + _len]
140
141 mov [lane_data + _job_in_lane], job
142 mov [state + _lens + 4 + 8*lane], DWORD(len)
143
144
145 ; Load digest words from result_digest
146 vmovdqa xmm0, [job + _result_digest + 0*16]
147 vmovdqa xmm1, [job + _result_digest + 1*16]
148 vmovdqa xmm2, [job + _result_digest + 2*16]
149 vmovdqa xmm3, [job + _result_digest + 3*16]
150 vmovq [state + _args_digest + 8*lane + 0*32], xmm0
151 vpextrq [state + _args_digest + 8*lane + 1*32], xmm0, 1
152 vmovq [state + _args_digest + 8*lane + 2*32], xmm1
153 vpextrq [state + _args_digest + 8*lane + 3*32], xmm1, 1
154 vmovq [state + _args_digest + 8*lane + 4*32], xmm2
155 vpextrq [state + _args_digest + 8*lane + 5*32], xmm2, 1
156 vmovq [state + _args_digest + 8*lane + 6*32], xmm3
157 vpextrq [state + _args_digest + 8*lane + 7*32], xmm3, 1
158
159 mov p, [job + _buffer]
160 mov [state + _args_data_ptr + 8*lane], p
161
162 cmp unused_lanes, 0xff
163 jne return_null
164
165 start_loop:
166
167 ; Find min length
168 mov lens0, [state + _lens + 0*8]
169 mov idx, lens0
170 mov lens1, [state + _lens + 1*8]
171 cmp lens1, idx
172 cmovb idx, lens1
173
174 mov len2, idx
175 and idx, 0xF
176 and len2, ~0xFF
177 jz len_is_0
178
179 sub lens0, len2
180 sub lens1, len2
181 shr len2, 32
182 mov [state + _lens + 0*8], lens0
183 mov [state + _lens + 1*8], lens1
184
185 ; "state" and "args" are the same address, arg1
186 ; len is arg2
187 call sha512_mb_x2_avx
188 ; state and idx are intact
189
190 len_is_0:
191
192 ; process completed job "idx"
193 imul lane_data, idx, _LANE_DATA_size
194 lea lane_data, [state + _ldata + lane_data]
195
196 mov job_rax, [lane_data + _job_in_lane]
197
198 mov unused_lanes, [state + _unused_lanes]
199 mov qword [lane_data + _job_in_lane], 0
200 mov dword [job_rax + _status], STS_COMPLETED
201 shl unused_lanes, 8
202 or unused_lanes, idx
203 mov [state + _unused_lanes], unused_lanes
204
205 vmovq xmm0, [state + _args_digest + 8*idx + 0*32]
206 vpinsrq xmm0, [state + _args_digest + 8*idx + 1*32], 1
207 vmovq xmm1, [state + _args_digest + 8*idx + 2*32]
208 vpinsrq xmm1, [state + _args_digest + 8*idx + 3*32], 1
209 vmovq xmm2, [state + _args_digest + 8*idx + 4*32]
210 vpinsrq xmm2, [state + _args_digest + 8*idx + 5*32], 1
211 vmovq xmm3, [state + _args_digest + 8*idx + 6*32]
212 vpinsrq xmm3, [state + _args_digest + 8*idx + 7*32], 1
213
214
215 vmovdqa [job_rax + _result_digest + 0*16], xmm0
216 vmovdqa [job_rax + _result_digest + 1*16], xmm1
217 vmovdqa [job_rax + _result_digest + 2*16], xmm2
218 vmovdqa [job_rax + _result_digest + 3*16], xmm3
219
220 return:
221
222 %ifidn __OUTPUT_FORMAT__, win64
223 vmovdqa xmm6, [rsp + 16*0]
224 vmovdqa xmm7, [rsp + 16*1]
225 vmovdqa xmm8, [rsp + 16*2]
226 vmovdqa xmm9, [rsp + 16*3]
227 vmovdqa xmm10, [rsp + 16*4]
228 vmovdqa xmm11, [rsp + 16*5]
229 vmovdqa xmm12, [rsp + 16*6]
230 vmovdqa xmm13, [rsp + 16*7]
231 vmovdqa xmm14, [rsp + 16*8]
232 vmovdqa xmm15, [rsp + 16*9]
233 mov rsi, [rsp + _XMM_SAVE + 8*3]
234 mov rdi, [rsp + _XMM_SAVE + 8*4]
235 %endif
236 mov rbx, [rsp + _XMM_SAVE + 8*0]
237 mov rbp, [rsp + _XMM_SAVE + 8*1]
238 mov r12, [rsp + _XMM_SAVE + 8*2]
239 mov rsp, [rsp + stack_frame.rsp]
240
241 ret
242
243 return_null:
244 xor job_rax, job_rax
245 jmp return
246
247 section .data align=16
248
249 align 16
250 H0: dd 0x6a09e667
251 H1: dd 0xbb67ae85
252 H2: dd 0x3c6ef372
253 H3: dd 0xa54ff53a
254 H4: dd 0x510e527f
255 H5: dd 0x9b05688c
256 H6: dd 0x1f83d9ab
257 H7: dd 0x5be0cd19
258