]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_mb_mgr_submit_avx512.asm
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_mb_mgr_submit_avx512.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "sha256_job.asm"
31 %include "memcpy.asm"
32 %include "sha256_mb_mgr_datastruct.asm"
33 %include "reg_sizes.asm"
34
35 %ifdef HAVE_AS_KNOWS_AVX512
36
37 extern sha256_mb_x16_avx512
38
39 [bits 64]
40 default rel
41 section .text
42
43 %ifidn __OUTPUT_FORMAT__, elf64
44 ; Linux register definitions
45 %define arg1 rdi ; rcx
46 %define arg2 rsi ; rdx
47
48 %define size_offset rcx ; rdi
49 %define tmp2 rcx ; rdi
50
51 %else
52 ; WINDOWS register definitions
53 %define arg1 rcx
54 %define arg2 rdx
55
56 %define size_offset rdi
57 %define tmp2 rdi
58
59 %endif
60
61 ; Common definitions
62 %define state arg1
63 %define job arg2
64 %define len2 arg2
65 %define p2 arg2
66
67 %define idx r8
68 %define last_len r8
69 %define p r11
70 %define start_offset r11
71 %define num_lanes_inuse r11
72
73 %define unused_lanes rbx
74
75 %define job_rax rax
76 %define len rax
77
78 %define lane rbp
79 %define tmp3 rbp
80
81 %define tmp r9
82
83 %define lane_data r10
84
85
86 ; STACK_SPACE needs to be an odd multiple of 8
87 %define STACK_SPACE 8*8 + 16*10 + 8
88
89 ; SHA256_JOB* sha256_mb_mgr_submit_avx512(SHA256_MB_JOB_MGR *state, SHA256_JOB *job)
90 ; arg 1 : rcx : state
91 ; arg 2 : rdx : job
92 mk_global sha256_mb_mgr_submit_avx512, function
93 sha256_mb_mgr_submit_avx512:
94 endbranch
95
96 sub rsp, STACK_SPACE
97 mov [rsp + 8*0], rbx
98 mov [rsp + 8*3], rbp
99 mov [rsp + 8*4], r12
100 mov [rsp + 8*5], r13
101 mov [rsp + 8*6], r14
102 mov [rsp + 8*7], r15
103 %ifidn __OUTPUT_FORMAT__, win64
104 mov [rsp + 8*1], rsi
105 mov [rsp + 8*2], rdi
106 vmovdqa [rsp + 8*8 + 16*0], xmm6
107 vmovdqa [rsp + 8*8 + 16*1], xmm7
108 vmovdqa [rsp + 8*8 + 16*2], xmm8
109 vmovdqa [rsp + 8*8 + 16*3], xmm9
110 vmovdqa [rsp + 8*8 + 16*4], xmm10
111 vmovdqa [rsp + 8*8 + 16*5], xmm11
112 vmovdqa [rsp + 8*8 + 16*6], xmm12
113 vmovdqa [rsp + 8*8 + 16*7], xmm13
114 vmovdqa [rsp + 8*8 + 16*8], xmm14
115 vmovdqa [rsp + 8*8 + 16*9], xmm15
116 %endif
117 mov unused_lanes, [state + _unused_lanes]
118 mov lane, unused_lanes
119 and lane, 0xF
120 shr unused_lanes, 4
121 imul lane_data, lane, _LANE_DATA_size
122 mov dword [job + _status], STS_BEING_PROCESSED
123 lea lane_data, [state + _ldata + lane_data]
124 mov [state + _unused_lanes], unused_lanes
125 mov DWORD(len), [job + _len]
126
127 shl len, 4
128 or len, lane
129 mov [state + _lens + 4*lane], DWORD(len)
130
131 mov [lane_data + _job_in_lane], job
132
133 ; Load digest words from result_digest
134 vmovdqu xmm0, [job + _result_digest + 0*16]
135 vmovdqu xmm1, [job + _result_digest + 1*16]
136 vmovd [state + _args_digest + 4*lane + 0*4*16], xmm0
137 vpextrd [state + _args_digest + 4*lane + 1*4*16], xmm0, 1
138 vpextrd [state + _args_digest + 4*lane + 2*4*16], xmm0, 2
139 vpextrd [state + _args_digest + 4*lane + 3*4*16], xmm0, 3
140 vmovd [state + _args_digest + 4*lane + 4*4*16], xmm1
141 vpextrd [state + _args_digest + 4*lane + 5*4*16], xmm1, 1
142 vpextrd [state + _args_digest + 4*lane + 6*4*16], xmm1, 2
143 vpextrd [state + _args_digest + 4*lane + 7*4*16], xmm1, 3
144
145
146 mov p, [job + _buffer]
147 mov [state + _args_data_ptr + 8*lane], p
148
149 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
150 add num_lanes_inuse, 1
151 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
152 cmp num_lanes_inuse, 16
153 jne return_null
154
155 start_loop:
156 ; Find min length, ymm0 holds ahead 8, ymm1 holds rear 8
157 vmovdqu ymm0, [state + _lens + 0*32]
158 vmovdqu ymm1, [state + _lens + 1*32]
159
160 vpminud ymm2, ymm0, ymm1 ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
161 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
162 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
163 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x, x,H2,x,x, x,D2}
164 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x, x,G3,x,x, x,C3}
165 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has {x,x, x, x,x,x, x,C3}
166 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
167
168 vmovd DWORD(idx), xmm2
169 mov len2, idx
170 and idx, 0xF
171 shr len2, 4
172 jz len_is_0
173
174 vpand ymm2, ymm2, [rel clear_low_nibble]
175 vpshufd ymm2, ymm2, 0
176
177 vpsubd ymm0, ymm0, ymm2
178 vpsubd ymm1, ymm1, ymm2
179
180 vmovdqu [state + _lens + 0*32], ymm0
181 vmovdqu [state + _lens + 1*32], ymm1
182
183
184
185 ; "state" and "args" are the same address, arg1
186 ; len is arg2
187 call sha256_mb_x16_avx512
188
189 ; state and idx are intact
190
191 len_is_0:
192 ; process completed job "idx"
193 imul lane_data, idx, _LANE_DATA_size
194 lea lane_data, [state + _ldata + lane_data]
195
196 mov job_rax, [lane_data + _job_in_lane]
197 mov unused_lanes, [state + _unused_lanes]
198 mov qword [lane_data + _job_in_lane], 0
199 mov dword [job_rax + _status], STS_COMPLETED
200 shl unused_lanes, 4
201 or unused_lanes, idx
202 mov [state + _unused_lanes], unused_lanes
203
204 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
205 sub num_lanes_inuse, 1
206 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
207 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*16]
208 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*16], 1
209 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*16], 2
210 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*16], 3
211 vmovd xmm1, [state + _args_digest + 4*idx + 4*4*16]
212 vpinsrd xmm1, [state + _args_digest + 4*idx + 5*4*16], 1
213 vpinsrd xmm1, [state + _args_digest + 4*idx + 6*4*16], 2
214 vpinsrd xmm1, [state + _args_digest + 4*idx + 7*4*16], 3
215
216 vmovdqa [job_rax + _result_digest + 0*16], xmm0
217 vmovdqa [job_rax + _result_digest + 1*16], xmm1
218
219 return:
220
221 %ifidn __OUTPUT_FORMAT__, win64
222 vmovdqa xmm6, [rsp + 8*8 + 16*0]
223 vmovdqa xmm7, [rsp + 8*8 + 16*1]
224 vmovdqa xmm8, [rsp + 8*8 + 16*2]
225 vmovdqa xmm9, [rsp + 8*8 + 16*3]
226 vmovdqa xmm10, [rsp + 8*8 + 16*4]
227 vmovdqa xmm11, [rsp + 8*8 + 16*5]
228 vmovdqa xmm12, [rsp + 8*8 + 16*6]
229 vmovdqa xmm13, [rsp + 8*8 + 16*7]
230 vmovdqa xmm14, [rsp + 8*8 + 16*8]
231 vmovdqa xmm15, [rsp + 8*8 + 16*9]
232 mov rsi, [rsp + 8*1]
233 mov rdi, [rsp + 8*2]
234 %endif
235 mov rbx, [rsp + 8*0]
236 mov rbp, [rsp + 8*3]
237 mov r12, [rsp + 8*4]
238 mov r13, [rsp + 8*5]
239 mov r14, [rsp + 8*6]
240 mov r15, [rsp + 8*7]
241 add rsp, STACK_SPACE
242
243 ret
244
245 return_null:
246 xor job_rax, job_rax
247 jmp return
248
249 section .data align=32
250
251 align 32
252 clear_low_nibble:
253 dq 0x00000000FFFFFFF0, 0x0000000000000000
254 dq 0x00000000FFFFFFF0, 0x0000000000000000
255
256 %else
257 %ifidn __OUTPUT_FORMAT__, win64
258 global no_sha256_mb_mgr_submit_avx512
259 no_sha256_mb_mgr_submit_avx512:
260 %endif
261 %endif ; HAVE_AS_KNOWS_AVX512