]> git.proxmox.com Git - ceph.git/blame - ceph/src/crypto/isa-l/isa-l_crypto/sha1_mb/sha1_mb_mgr_flush_avx512_ni.asm
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha1_mb / sha1_mb_mgr_flush_avx512_ni.asm
CommitLineData
1e59de90
TL
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2; Copyright(c) 2011-2017 Intel Corporation All rights reserved.
3;
4; Redistribution and use in source and binary forms, with or without
5; modification, are permitted provided that the following conditions
6; are met:
7; * Redistributions of source code must retain the above copyright
8; notice, this list of conditions and the following disclaimer.
9; * Redistributions in binary form must reproduce the above copyright
10; notice, this list of conditions and the following disclaimer in
11; the documentation and/or other materials provided with the
12; distribution.
13; * Neither the name of Intel Corporation nor the names of its
14; contributors may be used to endorse or promote products derived
15; from this software without specific prior written permission.
16;
17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sha1_job.asm"
31%include "sha1_mb_mgr_datastruct.asm"
32%include "reg_sizes.asm"
33
34%ifdef HAVE_AS_KNOWS_AVX512
35 %ifdef HAVE_AS_KNOWS_SHANI
36
37extern sha1_mb_x16_avx512
38extern sha1_ni_x1
39
40[bits 64]
41default rel
42section .text
43
44%ifidn __OUTPUT_FORMAT__, elf64
45;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
46; LINUX register definitions
47%define arg1 rdi ; rcx
48%define arg2 rsi ; rdx
49;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
50%else
51;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
52; WINDOWS register definitions
53%define arg1 rcx
54%define arg2 rdx
55;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
56%endif
57
58; Common definitions and latter-state(unused,covered,unchanged)
59%define state arg1 ; unchanged
60%define job arg2 ; unused
61%define len2 arg2 ; unused
62
63; idx must be a register not clobberred by sha1_mb_x16_avx512
64%define idx rbp ; unchanged
65
66%define unused_lanes rbx ; covered
67%define lane_data rbx ; covered
68%define tmp2 rbx ; covered
69
70%define num_lanes_inuse r9 ; covered
71
72%define job_rax rax ; covered
73%define tmp rax ; unused
74
75; STACK_SPACE needs to be an odd multiple of 8
76_XMM_SAVE_SIZE equ 10*16
77_GPR_SAVE_SIZE equ 8*8
78_ALIGN_SIZE equ 8
79
80_XMM_SAVE equ 0
81_GPR_SAVE equ _XMM_SAVE + _XMM_SAVE_SIZE
82STACK_SPACE equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
83
84%define APPEND(a,b) a %+ b
85
86; SHA1_JOB* sha1_mb_mgr_flush_avx512(SHA1_MB_JOB_MGR *state)
87; arg 1 : rcx : state
88mk_global sha1_mb_mgr_flush_avx512_ni, function
89sha1_mb_mgr_flush_avx512_ni:
90 endbranch
91 sub rsp, STACK_SPACE
92 mov [rsp + _GPR_SAVE + 8*0], rbx
93 mov [rsp + _GPR_SAVE + 8*3], rbp
94 mov [rsp + _GPR_SAVE + 8*4], r12
95 mov [rsp + _GPR_SAVE + 8*5], r13
96 mov [rsp + _GPR_SAVE + 8*6], r14
97 mov [rsp + _GPR_SAVE + 8*7], r15
98%ifidn __OUTPUT_FORMAT__, win64
99 mov [rsp + _GPR_SAVE + 8*1], rsi
100 mov [rsp + _GPR_SAVE + 8*2], rdi
101 vmovdqa [rsp + _XMM_SAVE + 16*0], xmm6
102 vmovdqa [rsp + _XMM_SAVE + 16*1], xmm7
103 vmovdqa [rsp + _XMM_SAVE + 16*2], xmm8
104 vmovdqa [rsp + _XMM_SAVE + 16*3], xmm9
105 vmovdqa [rsp + _XMM_SAVE + 16*4], xmm10
106 vmovdqa [rsp + _XMM_SAVE + 16*5], xmm11
107 vmovdqa [rsp + _XMM_SAVE + 16*6], xmm12
108 vmovdqa [rsp + _XMM_SAVE + 16*7], xmm13
109 vmovdqa [rsp + _XMM_SAVE + 16*8], xmm14
110 vmovdqa [rsp + _XMM_SAVE + 16*9], xmm15
111%endif
112
113 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
114 cmp num_lanes_inuse, 0
115 jz return_null
116
117 ; find a lane with a non-null job
118 xor idx, idx
119%assign I 1
120%rep 15
121 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
122 cmovne idx, [APPEND(lane_,I)]
123%assign I (I+1)
124%endrep
125
126 ; copy idx to empty lanes
127copy_lane_data:
128 mov tmp, [state + _args + _data_ptr + 8*idx]
129
130%assign I 0
131%rep 16
132 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
133 jne APPEND(skip_,I)
134 mov [state + _args + _data_ptr + 8*I], tmp
135 mov dword [state + _lens + 4*I], 0xFFFFFFFF
136APPEND(skip_,I):
137%assign I (I+1)
138%endrep
139
140 ; Find min length
141 vmovdqu ymm0, [state + _lens + 0*32]
142 vmovdqu ymm1, [state + _lens + 1*32]
143
144 vpminud ymm2, ymm0, ymm1 ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
145 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
146 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
147 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x, x,H2,x,x, x,D2}
148 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x, x,G3,x,x, x,C3}
149 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has {x,x, x, x,x,x, x,C3}
150 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
151
152 vmovd DWORD(idx), xmm2
153 mov len2, idx
154 and idx, 0xF
155 shr len2, 4
156 jz len_is_0
157
158 ; compare with shani-sb threshold, if num_lanes_inuse <= threshold, using shani func
159 cmp dword [state + _num_lanes_inuse], SHA1_NI_SB_THRESHOLD_AVX512
160 ja mb_processing
161
162 ; lensN-len2=idx
163 mov [state + _lens + idx*4], DWORD(idx)
164 mov r10, idx
165 or r10, 0x4000 ; avx2 has 8 lanes *4, r10b is idx, r10b2 is 32
166 ; "state" and "args" are the same address, arg1
167 ; len is arg2, idx and nlane in r10
168 call sha1_ni_x1
169 ; state and idx are intact
170 jmp len_is_0
171
172mb_processing:
173
174 vpand ymm2, ymm2, [rel clear_low_nibble]
175 vpshufd ymm2, ymm2, 0
176
177 vpsubd ymm0, ymm0, ymm2
178 vpsubd ymm1, ymm1, ymm2
179
180 vmovdqu [state + _lens + 0*32], ymm0
181 vmovdqu [state + _lens + 1*32], ymm1
182
183
184 ; "state" and "args" are the same address, arg1
185 ; len is arg2
186 call sha1_mb_x16_avx512
187 ; state and idx are intact
188
189len_is_0:
190 ; process completed job "idx"
191 imul lane_data, idx, _LANE_DATA_size
192 lea lane_data, [state + _ldata + lane_data]
193
194 mov job_rax, [lane_data + _job_in_lane]
195 mov qword [lane_data + _job_in_lane], 0
196 mov dword [job_rax + _status], STS_COMPLETED
197 mov unused_lanes, [state + _unused_lanes]
198 shl unused_lanes, 4
199 or unused_lanes, idx
200 mov [state + _unused_lanes], unused_lanes
201
202 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
203 sub num_lanes_inuse, 1
204 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
205
206 vmovd xmm0, [state + _args_digest + 4*idx + 0*64]
207 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*64], 1
208 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*64], 2
209 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*64], 3
210 mov DWORD(tmp2), [state + _args_digest + 4*idx + 4*64]
211
212 vmovdqa [job_rax + _result_digest + 0*16], xmm0
213 mov [job_rax + _result_digest + 1*16], DWORD(tmp2)
214
215return:
216
217%ifidn __OUTPUT_FORMAT__, win64
218 vmovdqa xmm6, [rsp + _XMM_SAVE + 16*0]
219 vmovdqa xmm7, [rsp + _XMM_SAVE + 16*1]
220 vmovdqa xmm8, [rsp + _XMM_SAVE + 16*2]
221 vmovdqa xmm9, [rsp + _XMM_SAVE + 16*3]
222 vmovdqa xmm10, [rsp + _XMM_SAVE + 16*4]
223 vmovdqa xmm11, [rsp + _XMM_SAVE + 16*5]
224 vmovdqa xmm12, [rsp + _XMM_SAVE + 16*6]
225 vmovdqa xmm13, [rsp + _XMM_SAVE + 16*7]
226 vmovdqa xmm14, [rsp + _XMM_SAVE + 16*8]
227 vmovdqa xmm15, [rsp + _XMM_SAVE + 16*9]
228 mov rsi, [rsp + _GPR_SAVE + 8*1]
229 mov rdi, [rsp + _GPR_SAVE + 8*2]
230%endif
231 mov rbx, [rsp + _GPR_SAVE + 8*0]
232 mov rbp, [rsp + _GPR_SAVE + 8*3]
233 mov r12, [rsp + _GPR_SAVE + 8*4]
234 mov r13, [rsp + _GPR_SAVE + 8*5]
235 mov r14, [rsp + _GPR_SAVE + 8*6]
236 mov r15, [rsp + _GPR_SAVE + 8*7]
237 add rsp, STACK_SPACE
238
239 ret
240
241return_null:
242 xor job_rax, job_rax
243 jmp return
244
245section .data align=16
246
247align 16
248clear_low_nibble:
249 dq 0x00000000FFFFFFF0, 0x0000000000000000
250 dq 0x00000000FFFFFFF0, 0x0000000000000000
251lane_1: dq 1
252lane_2: dq 2
253lane_3: dq 3
254lane_4: dq 4
255lane_5: dq 5
256lane_6: dq 6
257lane_7: dq 7
258lane_8: dq 8
259lane_9: dq 9
260lane_10: dq 10
261lane_11: dq 11
262lane_12: dq 12
263lane_13: dq 13
264lane_14: dq 14
265lane_15: dq 15
266
267 %else
268 %ifidn __OUTPUT_FORMAT__, win64
269 global no_sha1_mb_mgr_flush_avx512_ni
270 no_sha1_mb_mgr_flush_avx512_ni:
271 %endif
272 %endif ; HAVE_AS_KNOWS_SHANI
273%else
274%ifidn __OUTPUT_FORMAT__, win64
275 global no_sha1_mb_mgr_flush_avx512_ni
276 no_sha1_mb_mgr_flush_avx512_ni:
277 %endif
278%endif ; HAVE_AS_KNOWS_AVX512