]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sm3_mb/sm3_mb_mgr_flush_avx512.asm
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sm3_mb / sm3_mb_mgr_flush_avx512.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2020 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "sm3_job.asm"
31 %include "sm3_mb_mgr_datastruct.asm"
32 %include "reg_sizes.asm"
33
34
35
36 %ifdef HAVE_AS_KNOWS_AVX512
37
38 extern sm3_mb_x16_avx512
39 ;extern sm3_opt_x1
40
41 [bits 64]
42 default rel
43 section .text
44
45 %ifidn __OUTPUT_FORMAT__, elf64
46 %define arg1 rdi ; rcx
47 %define arg2 rsi ; rdx
48 %define tmp4 rdx
49 %else
50 %define arg1 rcx
51 %define arg2 rdx
52 %define tmp4 rsi
53 %endif
54
55
56 ; Common register definitions
57
58 %define state arg1
59 %define job arg2
60 %define len2 arg2
61
62 %define idx rbp
63
64 %define num_lanes_inuse r9
65 %define unused_lanes rbx
66 %define lane_data rbx
67 %define tmp2 rbx
68
69 %define job_rax rax
70 %define tmp1 rax
71 %define size_offset rax
72 %define tmp rax
73 %define start_offset rax
74
75 %define tmp3 arg1
76
77 %define extra_blocks arg2
78 %define p arg2
79
80
81
82 ; STACK_SPACE needs to be an odd multiple of 8
83 _XMM_SAVE_SIZE equ 10*16
84 _GPR_SAVE_SIZE equ 8*8
85 _ALIGN_SIZE equ 8
86
87 _XMM_SAVE equ 0
88 _GPR_SAVE equ _XMM_SAVE + _XMM_SAVE_SIZE
89 STACK_SPACE equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
90
91 %define APPEND(a,b) a %+ b
92
93
94 ; SM3_JOB* sm3_mb_mgr_flush_avx512(SM3_MB_JOB_MGR *state)
95 ; arg 1 : rcx : state
96 mk_global sm3_mb_mgr_flush_avx512, function
97 sm3_mb_mgr_flush_avx512:
98 endbranch
99
100 ; Save the stack
101 sub rsp, STACK_SPACE
102 mov [rsp + _GPR_SAVE + 8*0], rbx
103 mov [rsp + _GPR_SAVE + 8*3], rbp
104 mov [rsp + _GPR_SAVE + 8*4], r12
105 mov [rsp + _GPR_SAVE + 8*5], r13
106 mov [rsp + _GPR_SAVE + 8*6], r14
107 mov [rsp + _GPR_SAVE + 8*7], r15
108 %ifidn __OUTPUT_FORMAT__, win64
109 mov [rsp + _GPR_SAVE + 8*1], rsi
110 mov [rsp + _GPR_SAVE + 8*2], rdi
111 vmovdqa [rsp + _XMM_SAVE + 16*0], xmm6
112 vmovdqa [rsp + _XMM_SAVE + 16*1], xmm7
113 vmovdqa [rsp + _XMM_SAVE + 16*2], xmm8
114 vmovdqa [rsp + _XMM_SAVE + 16*3], xmm9
115 vmovdqa [rsp + _XMM_SAVE + 16*4], xmm10
116 vmovdqa [rsp + _XMM_SAVE + 16*5], xmm11
117 vmovdqa [rsp + _XMM_SAVE + 16*6], xmm12
118 vmovdqa [rsp + _XMM_SAVE + 16*7], xmm13
119 vmovdqa [rsp + _XMM_SAVE + 16*8], xmm14
120 vmovdqa [rsp + _XMM_SAVE + 16*9], xmm15
121 %endif
122
123 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
124 cmp num_lanes_inuse, 0
125 jz return_null
126
127 ; find a lane with a non-null job
128 xor idx, idx
129 %assign I 1
130 %rep 15
131 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
132 cmovne idx, [APPEND(lane_,I)]
133 %assign I (I+1)
134 %endrep
135
136
137 ; copy idx to empty lanes
138 copy_lane_data:
139 mov tmp, [state + _args + _data_ptr + 8*idx]
140
141 %assign I 0
142 %rep 16
143 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
144 jne APPEND(skip_,I)
145 mov [state + _args + _data_ptr + 8*I], tmp
146 mov dword [state + _lens + 4*I], 0xFFFFFFFF
147 APPEND(skip_,I):
148 %assign I (I+1)
149 %endrep
150
151 ; Find min length
152 vmovdqu ymm0, [state + _lens + 0*32]
153 vmovdqu ymm1, [state + _lens + 1*32]
154
155 vpminud ymm2, ymm0, ymm1 ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
156 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
157 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
158 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x, x,H2,x,x, x,D2}
159 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x, x,G3,x,x, x,C3}
160 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has {x,x, x, x,x,x, x,C3}
161 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
162
163 vmovd DWORD(idx), xmm2
164 mov len2, idx
165 and idx, 0xF
166 shr len2, 4
167 jz len_is_0
168
169 ; flush may check here and call x1
170
171 mb_processing:
172
173 vpand ymm2, ymm2, [rel clear_low_nibble]
174 vpshufd ymm2, ymm2, 0
175 vpsubd ymm0, ymm0, ymm2
176 vpsubd ymm1, ymm1, ymm2
177
178 vmovdqu [state + _lens + 0*32], ymm0
179 vmovdqu [state + _lens + 1*32], ymm1
180
181 ; "state" and "args" are the same address, arg1
182 ; len is arg2
183 call sm3_mb_x16_avx512
184 ; state and idx are intact
185
186
187 len_is_0:
188 ; process completed job "idx"
189 imul lane_data, idx, _LANE_DATA_size
190 lea lane_data, [state + _ldata + lane_data]
191
192 mov job_rax, [lane_data + _job_in_lane]
193 mov qword [lane_data + _job_in_lane], 0
194 mov dword [job_rax + _status], STS_COMPLETED
195 mov unused_lanes, [state + _unused_lanes]
196 shl unused_lanes, 4
197 or unused_lanes, idx
198 mov [state + _unused_lanes], unused_lanes
199
200 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
201 sub num_lanes_inuse, 1
202 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
203
204 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*16]
205 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*16], 1
206 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*16], 2
207 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*16], 3
208 vmovd xmm1, [state + _args_digest + 4*idx + 4*4*16]
209 vpinsrd xmm1, [state + _args_digest + 4*idx + 5*4*16], 1
210 vpinsrd xmm1, [state + _args_digest + 4*idx + 6*4*16], 2
211 vpinsrd xmm1, [state + _args_digest + 4*idx + 7*4*16], 3
212
213 vmovdqa [job_rax + _result_digest + 0*16], xmm0
214 vmovdqa [job_rax + _result_digest + 1*16], xmm1
215
216
217 ; return back stack
218 return:
219 %ifidn __OUTPUT_FORMAT__, win64
220 vmovdqa xmm6, [rsp + _XMM_SAVE + 16*0]
221 vmovdqa xmm7, [rsp + _XMM_SAVE + 16*1]
222 vmovdqa xmm8, [rsp + _XMM_SAVE + 16*2]
223 vmovdqa xmm9, [rsp + _XMM_SAVE + 16*3]
224 vmovdqa xmm10, [rsp + _XMM_SAVE + 16*4]
225 vmovdqa xmm11, [rsp + _XMM_SAVE + 16*5]
226 vmovdqa xmm12, [rsp + _XMM_SAVE + 16*6]
227 vmovdqa xmm13, [rsp + _XMM_SAVE + 16*7]
228 vmovdqa xmm14, [rsp + _XMM_SAVE + 16*8]
229 vmovdqa xmm15, [rsp + _XMM_SAVE + 16*9]
230 mov rsi, [rsp + _GPR_SAVE + 8*1]
231 mov rdi, [rsp + _GPR_SAVE + 8*2]
232 %endif
233 mov rbx, [rsp + _GPR_SAVE + 8*0]
234 mov rbp, [rsp + _GPR_SAVE + 8*3]
235 mov r12, [rsp + _GPR_SAVE + 8*4]
236 mov r13, [rsp + _GPR_SAVE + 8*5]
237 mov r14, [rsp + _GPR_SAVE + 8*6]
238 mov r15, [rsp + _GPR_SAVE + 8*7]
239 add rsp, STACK_SPACE
240
241 ret
242
243
244 return_null:
245 xor job_rax, job_rax
246 jmp return
247
248 section .data align=16
249
250 align 16
251 clear_low_nibble:
252 dq 0x00000000FFFFFFF0, 0x0000000000000000
253 dq 0x00000000FFFFFFF0, 0x0000000000000000
254 lane_1: dq 1
255 lane_2: dq 2
256 lane_3: dq 3
257 lane_4: dq 4
258 lane_5: dq 5
259 lane_6: dq 6
260 lane_7: dq 7
261 lane_8: dq 8
262 lane_9: dq 9
263 lane_10: dq 10
264 lane_11: dq 11
265 lane_12: dq 12
266 lane_13: dq 13
267 lane_14: dq 14
268 lane_15: dq 15
269
270 %else
271 %ifidn __OUTPUT_FORMAT__, win64
272 global no_sm3_mb_mgr_flush_avx512
273 no_sm3_mb_mgr_flush_avx512:
274 %endif
275
276 %endif ; HAVE_AS_KNOWS_AVX512