]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_mb_mgr_submit_avx2.asm
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_mb_mgr_submit_avx2.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "sha256_job.asm"
31 %include "memcpy.asm"
32 %include "sha256_mb_mgr_datastruct.asm"
33
34 %include "reg_sizes.asm"
35
36 extern sha256_mb_x8_avx2
37
38 [bits 64]
39 default rel
40 section .text
41
42 %ifidn __OUTPUT_FORMAT__, elf64
43 ; Linux register definitions
44 %define arg1 rdi ; rcx
45 %define arg2 rsi ; rdx
46
47 %define size_offset rcx ; rdi
48 %define tmp2 rcx ; rdi
49
50 %else
51 ; WINDOWS register definitions
52 %define arg1 rcx
53 %define arg2 rdx
54
55 %define size_offset rdi
56 %define tmp2 rdi
57
58 %endif
59
60 ; Common definitions
61 %define state arg1
62 %define job arg2
63 %define len2 arg2
64 %define p2 arg2
65
66 %define idx r8
67 %define last_len r8
68 %define p r11
69 %define start_offset r11
70
71 %define unused_lanes rbx
72
73 %define job_rax rax
74 %define len rax
75
76 %define lane rbp
77 %define tmp3 rbp
78
79 %define tmp r9
80
81 %define lane_data r10
82
83
84 ; STACK_SPACE needs to be an odd multiple of 8
85 %define STACK_SPACE 8*8 + 16*10 + 8
86
87 ; SHA256_JOB* sha256_mb_mgr_submit_avx2(SHA256_MB_JOB_MGR *state, SHA256_JOB *job)
88 ; arg 1 : rcx : state
89 ; arg 2 : rdx : job
90 mk_global sha256_mb_mgr_submit_avx2, function
91 sha256_mb_mgr_submit_avx2:
92 endbranch
93
94 sub rsp, STACK_SPACE
95 mov [rsp + 8*0], rbx
96 mov [rsp + 8*3], rbp
97 mov [rsp + 8*4], r12
98 mov [rsp + 8*5], r13
99 mov [rsp + 8*6], r14
100 mov [rsp + 8*7], r15
101 %ifidn __OUTPUT_FORMAT__, win64
102 mov [rsp + 8*1], rsi
103 mov [rsp + 8*2], rdi
104 vmovdqa [rsp + 8*8 + 16*0], xmm6
105 vmovdqa [rsp + 8*8 + 16*1], xmm7
106 vmovdqa [rsp + 8*8 + 16*2], xmm8
107 vmovdqa [rsp + 8*8 + 16*3], xmm9
108 vmovdqa [rsp + 8*8 + 16*4], xmm10
109 vmovdqa [rsp + 8*8 + 16*5], xmm11
110 vmovdqa [rsp + 8*8 + 16*6], xmm12
111 vmovdqa [rsp + 8*8 + 16*7], xmm13
112 vmovdqa [rsp + 8*8 + 16*8], xmm14
113 vmovdqa [rsp + 8*8 + 16*9], xmm15
114 %endif
115 mov unused_lanes, [state + _unused_lanes]
116 mov lane, unused_lanes
117 and lane, 0xF
118 shr unused_lanes, 4
119 imul lane_data, lane, _LANE_DATA_size
120 mov dword [job + _status], STS_BEING_PROCESSED
121 lea lane_data, [state + _ldata + lane_data]
122 mov [state + _unused_lanes], unused_lanes
123 mov DWORD(len), [job + _len]
124
125 shl len, 4
126 or len, lane
127 mov [state + _lens + 4*lane], DWORD(len)
128
129 mov [lane_data + _job_in_lane], job
130
131 ; Load digest words from result_digest
132 vmovdqu xmm0, [job + _result_digest + 0*16]
133 vmovdqu xmm1, [job + _result_digest + 1*16]
134 vmovd [state + _args_digest + 4*lane + 0*4*8], xmm0
135 vpextrd [state + _args_digest + 4*lane + 1*4*8], xmm0, 1
136 vpextrd [state + _args_digest + 4*lane + 2*4*8], xmm0, 2
137 vpextrd [state + _args_digest + 4*lane + 3*4*8], xmm0, 3
138 vmovd [state + _args_digest + 4*lane + 4*4*8], xmm1
139 vpextrd [state + _args_digest + 4*lane + 5*4*8], xmm1, 1
140 vpextrd [state + _args_digest + 4*lane + 6*4*8], xmm1, 2
141 vpextrd [state + _args_digest + 4*lane + 7*4*8], xmm1, 3
142
143
144 mov p, [job + _buffer]
145 mov [state + _args_data_ptr + 8*lane], p
146
147 add dword [state + _num_lanes_inuse], 1
148 cmp unused_lanes, 0xf
149 jne return_null
150
151 start_loop:
152 ; Find min length
153 vmovdqa xmm0, [state + _lens + 0*16]
154 vmovdqa xmm1, [state + _lens + 1*16]
155
156 vpminud xmm2, xmm0, xmm1 ; xmm2 has {D,C,B,A}
157 vpalignr xmm3, xmm3, xmm2, 8 ; xmm3 has {x,x,D,C}
158 vpminud xmm2, xmm2, xmm3 ; xmm2 has {x,x,E,F}
159 vpalignr xmm3, xmm3, xmm2, 4 ; xmm3 has {x,x,x,E}
160 vpminud xmm2, xmm2, xmm3 ; xmm2 has min value in low dword
161
162 vmovd DWORD(idx), xmm2
163 mov len2, idx
164 and idx, 0xF
165 shr len2, 4
166 jz len_is_0
167
168 vpand xmm2, xmm2, [rel clear_low_nibble]
169 vpshufd xmm2, xmm2, 0
170
171 vpsubd xmm0, xmm0, xmm2
172 vpsubd xmm1, xmm1, xmm2
173
174 vmovdqa [state + _lens + 0*16], xmm0
175 vmovdqa [state + _lens + 1*16], xmm1
176
177
178 ; "state" and "args" are the same address, arg1
179 ; len is arg2
180 call sha256_mb_x8_avx2
181
182 ; state and idx are intact
183
184 len_is_0:
185 ; process completed job "idx"
186 imul lane_data, idx, _LANE_DATA_size
187 lea lane_data, [state + _ldata + lane_data]
188
189 mov job_rax, [lane_data + _job_in_lane]
190 mov unused_lanes, [state + _unused_lanes]
191 mov qword [lane_data + _job_in_lane], 0
192 mov dword [job_rax + _status], STS_COMPLETED
193 shl unused_lanes, 4
194 or unused_lanes, idx
195 mov [state + _unused_lanes], unused_lanes
196
197 sub dword [state + _num_lanes_inuse], 1
198
199 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*8]
200 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*8], 1
201 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*8], 2
202 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*8], 3
203 vmovd xmm1, [state + _args_digest + 4*idx + 4*4*8]
204 vpinsrd xmm1, [state + _args_digest + 4*idx + 5*4*8], 1
205 vpinsrd xmm1, [state + _args_digest + 4*idx + 6*4*8], 2
206 vpinsrd xmm1, [state + _args_digest + 4*idx + 7*4*8], 3
207
208 vmovdqa [job_rax + _result_digest + 0*16], xmm0
209 vmovdqa [job_rax + _result_digest + 1*16], xmm1
210
211 return:
212
213 %ifidn __OUTPUT_FORMAT__, win64
214 vmovdqa xmm6, [rsp + 8*8 + 16*0]
215 vmovdqa xmm7, [rsp + 8*8 + 16*1]
216 vmovdqa xmm8, [rsp + 8*8 + 16*2]
217 vmovdqa xmm9, [rsp + 8*8 + 16*3]
218 vmovdqa xmm10, [rsp + 8*8 + 16*4]
219 vmovdqa xmm11, [rsp + 8*8 + 16*5]
220 vmovdqa xmm12, [rsp + 8*8 + 16*6]
221 vmovdqa xmm13, [rsp + 8*8 + 16*7]
222 vmovdqa xmm14, [rsp + 8*8 + 16*8]
223 vmovdqa xmm15, [rsp + 8*8 + 16*9]
224 mov rsi, [rsp + 8*1]
225 mov rdi, [rsp + 8*2]
226 %endif
227 mov rbx, [rsp + 8*0]
228 mov rbp, [rsp + 8*3]
229 mov r12, [rsp + 8*4]
230 mov r13, [rsp + 8*5]
231 mov r14, [rsp + 8*6]
232 mov r15, [rsp + 8*7]
233 add rsp, STACK_SPACE
234
235 ret
236
237 return_null:
238 xor job_rax, job_rax
239 jmp return
240
241 section .data align=16
242
243 align 16
244 clear_low_nibble:
245 dq 0x00000000FFFFFFF0, 0x0000000000000000
246