]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/md5_mb/md5_mb_mgr_flush_avx512.asm
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / md5_mb / md5_mb_mgr_flush_avx512.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "md5_job.asm"
31 %include "md5_mb_mgr_datastruct.asm"
32 %include "reg_sizes.asm"
33
34 %ifdef HAVE_AS_KNOWS_AVX512
35 extern md5_mb_x16x2_avx512
36 default rel
37
38 %if 1
39 %ifidn __OUTPUT_FORMAT__, elf64
40 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
41 ; UN*X register definitions
42 %define arg1 rdi ; rcx
43 %define arg2 rsi ; rdx
44
45 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
46
47 %else
48
49 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
50 ; WINDOWS register definitions
51 %define arg1 rcx
52 %define arg2 rdx
53
54 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
55 %endif
56
57 ; Common register definitions
58
59 %define state arg1
60 %define len2 arg2
61
62 ; idx must be a register not clobberred by md5_mb_x16_avx512
63 %define idx rbp
64
65 %define unused_lanes ymm7
66 %define lane r9
67
68 %define lane_data r10
69
70 %define job_rax rax
71 %define tmp rax
72
73 %define num_lanes_inuse r8
74
75 %endif ;; if 1
76
77 ; STACK_SPACE needs to be an odd multiple of 8
78 _XMM_SAVE_SIZE equ 10*16
79 _GPR_SAVE_SIZE equ 8*8
80 _ALIGN_SIZE equ 8
81
82 _XMM_SAVE equ 0
83 _GPR_SAVE equ _XMM_SAVE + _XMM_SAVE_SIZE
84 STACK_SPACE equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
85
86 %define APPEND(a,b) a %+ b
87
88 ;; Byte shift in MEM addr, read a extra byte [addr+16]
89 %macro MEM_VPSRLDDQ 2
90 %define %%addr %1
91 %define %%TMP_YMM %2
92 vmovdqu %%TMP_YMM, [%%addr + 1]
93 vmovdqu [%%addr], %%TMP_YMM
94 mov [%%addr + 31], byte 0
95 %endmacro
96
97 ;; Byte shift in MEM addr, read a extra byte [addr-1]
98 %macro MEM_VPSLLDDQ 2
99 %define %%addr %1
100 %define %%TMP_YMM %2
101 vmovdqu %%TMP_YMM, [%%addr-1]
102 vmovdqu [%%addr], %%TMP_YMM
103 mov [%%addr], byte 0
104 %endmacro
105
106 align 64
107 default rel
108 section .text
109
110 ; JOB* md5_mb_mgr_flush_avx512(MB_MGR_HMAC_OOO *state)
111 ; arg 1 : rcx : state
112 global md5_mb_mgr_flush_avx512:function
113 md5_mb_mgr_flush_avx512:
114 sub rsp, STACK_SPACE
115 mov [rsp + _GPR_SAVE + 8*0], rbx
116 mov [rsp + _GPR_SAVE + 8*3], rbp
117 mov [rsp + _GPR_SAVE + 8*4], r12
118 mov [rsp + _GPR_SAVE + 8*5], r13
119 mov [rsp + _GPR_SAVE + 8*6], r14
120 mov [rsp + _GPR_SAVE + 8*7], r15
121 %ifidn __OUTPUT_FORMAT__, win64
122 mov [rsp + _GPR_SAVE + 8*1], rsi
123 mov [rsp + _GPR_SAVE + 8*2], rdi
124 vmovdqa [rsp + _XMM_SAVE + 16*0], xmm6
125 vmovdqa [rsp + _XMM_SAVE + 16*1], xmm7
126 vmovdqa [rsp + _XMM_SAVE + 16*2], xmm8
127 vmovdqa [rsp + _XMM_SAVE + 16*3], xmm9
128 vmovdqa [rsp + _XMM_SAVE + 16*4], xmm10
129 vmovdqa [rsp + _XMM_SAVE + 16*5], xmm11
130 vmovdqa [rsp + _XMM_SAVE + 16*6], xmm12
131 vmovdqa [rsp + _XMM_SAVE + 16*7], xmm13
132 vmovdqa [rsp + _XMM_SAVE + 16*8], xmm14
133 vmovdqa [rsp + _XMM_SAVE + 16*9], xmm15
134 %endif
135
136 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
137 cmp num_lanes_inuse, 0
138 jz return_null
139
140 ; find a lane with a non-null job
141 xor idx, idx
142 %assign I 1
143 %rep 31
144 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
145 cmovne idx, [APPEND(lane_,I)]
146 %assign I (I+1)
147 %endrep
148
149 ; copy idx to empty lanes
150 copy_lane_data:
151 mov tmp, [state + _args + _data_ptr + 8*idx]
152
153 %assign I 0
154 %rep 32
155 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
156 jne APPEND(skip_,I)
157 mov [state + _args + _data_ptr + 8*I], tmp
158 mov dword [state + _lens + 4*I], 0xFFFFFFFF
159 APPEND(skip_,I):
160 %assign I (I+1)
161 %endrep
162
163 ; Find min length
164 vmovdqu ymm0, [state + _lens + 0*32]
165 vmovdqu ymm1, [state + _lens + 1*32]
166
167 vpminud ymm2, ymm0, ymm1 ; ymm2 has {D,C,B,A}
168 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,D,C}
169 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,E,F}
170 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x,x,E}
171 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
172 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has halves of ymm2 reversed
173 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
174 ; Find min length
175 vmovdqu ymm5, [state + _lens + 2*32]
176 vmovdqu ymm6, [state + _lens + 3*32]
177
178 vpminud ymm4, ymm5, ymm6 ; ymm4 has {D,C,B,A}
179 vpalignr ymm3, ymm3, ymm4, 8 ; ymm3 has {x,x,D,C}
180 vpminud ymm4, ymm4, ymm3 ; ymm4 has {x,x,E,F}
181 vpalignr ymm3, ymm3, ymm4, 4 ; ymm3 has {x,x,x,E}
182 vpminud ymm4, ymm4, ymm3 ; ymm4 has min value in low dword
183 vperm2i128 ymm3, ymm4, ymm4, 1 ; ymm3 has halves of ymm4 reversed
184 vpminud ymm4, ymm4, ymm3 ; ymm4 has min value in low dword
185
186 vpminud ymm2, ymm2, ymm4 ; ymm2 has min value in low dword
187 vmovd DWORD(idx), xmm2
188 mov len2, idx
189 and idx, 0x3F
190 shr len2, 6
191 jz len_is_0
192
193 vpand ymm2, ymm2, [rel clear_low_6bits]
194 vpshufd ymm2, ymm2, 0
195
196 vpsubd ymm0, ymm0, ymm2
197 vpsubd ymm1, ymm1, ymm2
198 vpsubd ymm5, ymm5, ymm2
199 vpsubd ymm6, ymm6, ymm2
200
201 vmovdqu [state + _lens + 0*32], ymm0
202 vmovdqu [state + _lens + 1*32], ymm1
203 vmovdqu [state + _lens + 2*32], ymm5
204 vmovdqu [state + _lens + 3*32], ymm6
205
206 ; "state" and "args" are the same address, arg1
207 ; len is arg2
208 call md5_mb_x16x2_avx512
209 ; state and idx are intact
210
211 len_is_0:
212 ; process completed job "idx"
213 imul lane_data, idx, _LANE_DATA_size
214 lea lane_data, [state + _ldata + lane_data]
215
216 mov job_rax, [lane_data + _job_in_lane]
217 mov lane, [state + _unused_lanes]
218 mov qword [lane_data + _job_in_lane], 0
219 mov dword [job_rax + _status], STS_COMPLETED
220
221 shl lane, 8
222 or lane, idx
223 MEM_VPSLLDDQ (state + _unused_lanes), unused_lanes
224 mov [state + _unused_lanes], lane
225
226 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
227 sub num_lanes_inuse, 1
228 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
229
230 mov dword [state + _lens + 4*idx], 0xFFFFFFFF
231
232 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*16*2]
233 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*16*2], 1
234 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*16*2], 2
235 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*16*2], 3
236
237 vmovdqa [job_rax + _result_digest + 0*16], xmm0
238
239 return:
240
241 %ifidn __OUTPUT_FORMAT__, win64
242 vmovdqa xmm6, [rsp + _XMM_SAVE + 16*0]
243 vmovdqa xmm7, [rsp + _XMM_SAVE + 16*1]
244 vmovdqa xmm8, [rsp + _XMM_SAVE + 16*2]
245 vmovdqa xmm9, [rsp + _XMM_SAVE + 16*3]
246 vmovdqa xmm10, [rsp + _XMM_SAVE + 16*4]
247 vmovdqa xmm11, [rsp + _XMM_SAVE + 16*5]
248 vmovdqa xmm12, [rsp + _XMM_SAVE + 16*6]
249 vmovdqa xmm13, [rsp + _XMM_SAVE + 16*7]
250 vmovdqa xmm14, [rsp + _XMM_SAVE + 16*8]
251 vmovdqa xmm15, [rsp + _XMM_SAVE + 16*9]
252 mov rsi, [rsp + _GPR_SAVE + 8*1]
253 mov rdi, [rsp + _GPR_SAVE + 8*2]
254 %endif
255 mov rbx, [rsp + _GPR_SAVE + 8*0]
256 mov rbp, [rsp + _GPR_SAVE + 8*3]
257 mov r12, [rsp + _GPR_SAVE + 8*4]
258 mov r13, [rsp + _GPR_SAVE + 8*5]
259 mov r14, [rsp + _GPR_SAVE + 8*6]
260 mov r15, [rsp + _GPR_SAVE + 8*7]
261 add rsp, STACK_SPACE
262
263 ret
264
265 return_null:
266 xor job_rax, job_rax
267 jmp return
268
269
270 section .data align=16
271
272 align 16
273 clear_low_6bits:
274 dq 0x00000000FFFFFFC0, 0x0000000000000000
275 dq 0x00000000FFFFFFC0, 0x0000000000000000
276 lane_1: dq 1
277 lane_2: dq 2
278 lane_3: dq 3
279 lane_4: dq 4
280 lane_5: dq 5
281 lane_6: dq 6
282 lane_7: dq 7
283 lane_8: dq 8
284 lane_9: dq 9
285 lane_10: dq 10
286 lane_11: dq 11
287 lane_12: dq 12
288 lane_13: dq 13
289 lane_14: dq 14
290 lane_15: dq 15
291 lane_16: dq 16
292 lane_17: dq 17
293 lane_18: dq 18
294 lane_19: dq 19
295 lane_20: dq 20
296 lane_21: dq 21
297 lane_22: dq 22
298 lane_23: dq 23
299 lane_24: dq 24
300 lane_25: dq 25
301 lane_26: dq 26
302 lane_27: dq 27
303 lane_28: dq 28
304 lane_29: dq 29
305 lane_30: dq 30
306 lane_31: dq 31
307
308 %else
309 %ifidn __OUTPUT_FORMAT__, win64
310 global no_md5_mb_mgr_flush_avx512
311 no_md5_mb_mgr_flush_avx512:
312 %endif
313 %endif ; HAVE_AS_KNOWS_AVX512