]> git.proxmox.com Git - ceph.git/blame - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_mb_mgr_flush_avx512.asm
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_mb_mgr_flush_avx512.asm
CommitLineData
7c673cae
FG
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
1e59de90 5; modification, are permitted provided that the following conditions
7c673cae
FG
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sha256_job.asm"
31%include "sha256_mb_mgr_datastruct.asm"
32%include "reg_sizes.asm"
33
34%ifdef HAVE_AS_KNOWS_AVX512
35
36extern sha256_mb_x16_avx512
1e59de90
TL
37extern sha256_opt_x1
38
39[bits 64]
7c673cae 40default rel
1e59de90 41section .text
7c673cae
FG
42
43%ifidn __OUTPUT_FORMAT__, elf64
44;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
45; LINUX register definitions
46%define arg1 rdi ; rcx
47%define arg2 rsi ; rdx
48
49%define tmp4 rdx
50;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
51
52%else
53
54;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
55; WINDOWS register definitions
56%define arg1 rcx
57%define arg2 rdx
58
59%define tmp4 rsi
60;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
61%endif
62
63; Common register definitions
64
65%define state arg1
66%define job arg2
67%define len2 arg2
68
1e59de90
TL
69; idx must be a register not clobberred by sha256_mb_x16_avx2 and sha256_opt_x1
70%define idx rbp
7c673cae
FG
71
72%define num_lanes_inuse r9
73%define unused_lanes rbx
74%define lane_data rbx
75%define tmp2 rbx
76
77%define job_rax rax
78%define tmp1 rax
79%define size_offset rax
80%define tmp rax
81%define start_offset rax
82
83%define tmp3 arg1
84
85%define extra_blocks arg2
86%define p arg2
87
88
89; STACK_SPACE needs to be an odd multiple of 8
90_XMM_SAVE_SIZE equ 10*16
91_GPR_SAVE_SIZE equ 8*8
92_ALIGN_SIZE equ 8
93
94_XMM_SAVE equ 0
95_GPR_SAVE equ _XMM_SAVE + _XMM_SAVE_SIZE
96STACK_SPACE equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
97
98%define APPEND(a,b) a %+ b
99
100; SHA256_JOB* sha256_mb_mgr_flush_avx512(SHA256_MB_JOB_MGR *state)
101; arg 1 : rcx : state
1e59de90 102mk_global sha256_mb_mgr_flush_avx512, function
7c673cae 103sha256_mb_mgr_flush_avx512:
1e59de90 104 endbranch
7c673cae
FG
105 sub rsp, STACK_SPACE
106 mov [rsp + _GPR_SAVE + 8*0], rbx
107 mov [rsp + _GPR_SAVE + 8*3], rbp
108 mov [rsp + _GPR_SAVE + 8*4], r12
109 mov [rsp + _GPR_SAVE + 8*5], r13
110 mov [rsp + _GPR_SAVE + 8*6], r14
111 mov [rsp + _GPR_SAVE + 8*7], r15
112%ifidn __OUTPUT_FORMAT__, win64
113 mov [rsp + _GPR_SAVE + 8*1], rsi
114 mov [rsp + _GPR_SAVE + 8*2], rdi
115 vmovdqa [rsp + _XMM_SAVE + 16*0], xmm6
116 vmovdqa [rsp + _XMM_SAVE + 16*1], xmm7
117 vmovdqa [rsp + _XMM_SAVE + 16*2], xmm8
118 vmovdqa [rsp + _XMM_SAVE + 16*3], xmm9
119 vmovdqa [rsp + _XMM_SAVE + 16*4], xmm10
120 vmovdqa [rsp + _XMM_SAVE + 16*5], xmm11
121 vmovdqa [rsp + _XMM_SAVE + 16*6], xmm12
122 vmovdqa [rsp + _XMM_SAVE + 16*7], xmm13
123 vmovdqa [rsp + _XMM_SAVE + 16*8], xmm14
124 vmovdqa [rsp + _XMM_SAVE + 16*9], xmm15
125%endif
126
127 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
128 cmp num_lanes_inuse, 0
129 jz return_null
130
131 ; find a lane with a non-null job
132 xor idx, idx
133%assign I 1
134%rep 15
135 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
136 cmovne idx, [APPEND(lane_,I)]
137%assign I (I+1)
138%endrep
139
140
141 ; copy idx to empty lanes
142copy_lane_data:
143 mov tmp, [state + _args + _data_ptr + 8*idx]
144
145%assign I 0
146%rep 16
147 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
148 jne APPEND(skip_,I)
149 mov [state + _args + _data_ptr + 8*I], tmp
150 mov dword [state + _lens + 4*I], 0xFFFFFFFF
151APPEND(skip_,I):
152%assign I (I+1)
153%endrep
154
155 ; Find min length
156 vmovdqu ymm0, [state + _lens + 0*32]
157 vmovdqu ymm1, [state + _lens + 1*32]
158
159 vpminud ymm2, ymm0, ymm1 ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
160 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
161 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
162 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x, x,H2,x,x, x,D2}
163 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x, x,G3,x,x, x,C3}
164 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has {x,x, x, x,x,x, x,C3}
165 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
166
167 vmovd DWORD(idx), xmm2
168 mov len2, idx
169 and idx, 0xF
170 shr len2, 4
171 jz len_is_0
172
1e59de90
TL
173 ; compare with sha-sb threshold, if num_lanes_inuse <= threshold, using sb func
174 cmp dword [state + _num_lanes_inuse], SHA256_SB_THRESHOLD_AVX512
175 ja mb_processing
176
177 ; lensN-len2=idx
178 mov [state + _lens + idx*4], DWORD(idx)
179 mov r10, idx
180 or r10, 0x4000 ; avx2 has 8 lanes *4, r10b is idx, r10b2 is 32
181 ; "state" and "args" are the same address, arg1
182 ; len is arg2, idx and nlane in r10
183 call sha256_opt_x1
184 ; state and idx are intact
185 jmp len_is_0
186
187mb_processing:
188
7c673cae
FG
189 vpand ymm2, ymm2, [rel clear_low_nibble]
190 vpshufd ymm2, ymm2, 0
191
192 vpsubd ymm0, ymm0, ymm2
193 vpsubd ymm1, ymm1, ymm2
194
195 vmovdqu [state + _lens + 0*32], ymm0
196 vmovdqu [state + _lens + 1*32], ymm1
197
198 ; "state" and "args" are the same address, arg1
199 ; len is arg2
200 call sha256_mb_x16_avx512
201 ; state and idx are intact
202
203len_is_0:
204 ; process completed job "idx"
205 imul lane_data, idx, _LANE_DATA_size
206 lea lane_data, [state + _ldata + lane_data]
207
208 mov job_rax, [lane_data + _job_in_lane]
209 mov qword [lane_data + _job_in_lane], 0
210 mov dword [job_rax + _status], STS_COMPLETED
211 mov unused_lanes, [state + _unused_lanes]
212 shl unused_lanes, 4
213 or unused_lanes, idx
214 mov [state + _unused_lanes], unused_lanes
215
216 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
217 sub num_lanes_inuse, 1
218 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
219
220 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*16]
221 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*16], 1
222 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*16], 2
223 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*16], 3
224 vmovd xmm1, [state + _args_digest + 4*idx + 4*4*16]
225 vpinsrd xmm1, [state + _args_digest + 4*idx + 5*4*16], 1
226 vpinsrd xmm1, [state + _args_digest + 4*idx + 6*4*16], 2
227 vpinsrd xmm1, [state + _args_digest + 4*idx + 7*4*16], 3
228
229 vmovdqa [job_rax + _result_digest + 0*16], xmm0
230 vmovdqa [job_rax + _result_digest + 1*16], xmm1
231
232return:
233%ifidn __OUTPUT_FORMAT__, win64
234 vmovdqa xmm6, [rsp + _XMM_SAVE + 16*0]
235 vmovdqa xmm7, [rsp + _XMM_SAVE + 16*1]
236 vmovdqa xmm8, [rsp + _XMM_SAVE + 16*2]
237 vmovdqa xmm9, [rsp + _XMM_SAVE + 16*3]
238 vmovdqa xmm10, [rsp + _XMM_SAVE + 16*4]
239 vmovdqa xmm11, [rsp + _XMM_SAVE + 16*5]
240 vmovdqa xmm12, [rsp + _XMM_SAVE + 16*6]
241 vmovdqa xmm13, [rsp + _XMM_SAVE + 16*7]
242 vmovdqa xmm14, [rsp + _XMM_SAVE + 16*8]
243 vmovdqa xmm15, [rsp + _XMM_SAVE + 16*9]
244 mov rsi, [rsp + _GPR_SAVE + 8*1]
245 mov rdi, [rsp + _GPR_SAVE + 8*2]
246%endif
247 mov rbx, [rsp + _GPR_SAVE + 8*0]
248 mov rbp, [rsp + _GPR_SAVE + 8*3]
249 mov r12, [rsp + _GPR_SAVE + 8*4]
250 mov r13, [rsp + _GPR_SAVE + 8*5]
251 mov r14, [rsp + _GPR_SAVE + 8*6]
252 mov r15, [rsp + _GPR_SAVE + 8*7]
253 add rsp, STACK_SPACE
254
255 ret
256
257return_null:
258 xor job_rax, job_rax
259 jmp return
260
261section .data align=16
262
263align 16
264clear_low_nibble:
265 dq 0x00000000FFFFFFF0, 0x0000000000000000
266 dq 0x00000000FFFFFFF0, 0x0000000000000000
267lane_1: dq 1
268lane_2: dq 2
269lane_3: dq 3
270lane_4: dq 4
271lane_5: dq 5
272lane_6: dq 6
273lane_7: dq 7
274lane_8: dq 8
275lane_9: dq 9
276lane_10: dq 10
277lane_11: dq 11
278lane_12: dq 12
279lane_13: dq 13
280lane_14: dq 14
281lane_15: dq 15
282
283%else
284%ifidn __OUTPUT_FORMAT__, win64
285global no_sha256_mb_mgr_flush_avx512
286no_sha256_mb_mgr_flush_avx512:
287%endif
288%endif ; HAVE_AS_KNOWS_AVX512