]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/md5_mb/md5_mb_mgr_submit_avx512.asm
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / md5_mb / md5_mb_mgr_submit_avx512.asm
1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2 ; Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3 ;
4 ; Redistribution and use in source and binary forms, with or without
5 ; modification, are permitted provided that the following conditions
6 ; are met:
7 ; * Redistributions of source code must retain the above copyright
8 ; notice, this list of conditions and the following disclaimer.
9 ; * Redistributions in binary form must reproduce the above copyright
10 ; notice, this list of conditions and the following disclaimer in
11 ; the documentation and/or other materials provided with the
12 ; distribution.
13 ; * Neither the name of Intel Corporation nor the names of its
14 ; contributors may be used to endorse or promote products derived
15 ; from this software without specific prior written permission.
16 ;
17 ; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 ; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 ; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 ; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 ; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 ; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 ; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 ; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 ; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 ; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30 %include "md5_job.asm"
31 %include "md5_mb_mgr_datastruct.asm"
32 %include "reg_sizes.asm"
33
34 %ifdef HAVE_AS_KNOWS_AVX512
35 extern md5_mb_x16x2_avx512
36 default rel
37
38 %if 1
39 %ifidn __OUTPUT_FORMAT__, win64
40 ; WINDOWS register definitions
41 %define arg1 rcx
42 %define arg2 rdx
43
44 %define lane rsi
45
46 %else
47 ; UN*X register definitions
48 %define arg1 rdi
49 %define arg2 rsi
50
51 %define lane rdx
52
53 %endif
54
55 ; Common definitions
56 %define state arg1
57 %define job arg2
58 %define len2 arg2
59
60 ; idx needs to be in a register not clobberred by md5_mb_x16_avx512
61 %define idx rbp
62
63 %define p r11
64
65 %define unused_lanes ymm7
66
67 %define job_rax rax
68 %define len rax
69
70 %define num_lanes_inuse r9
71
72 %define lane_data r10
73
74 %endif ; if 1
75
76 ; STACK_SPACE needs to be an odd multiple of 8
77 %define STACK_SPACE 8*8 + 16*10 + 8
78
79 ;; Byte shift in MEM addr, read a extra byte [addr+16]
80 %macro MEM_VPSRLDDQ 2
81 %define %%addr %1
82 %define %%TMP_YMM %2
83 vmovdqu %%TMP_YMM, [%%addr + 1]
84 vmovdqu [%%addr], %%TMP_YMM
85 mov [%%addr + 31], byte 0
86 %endmacro
87
88 ;; Byte shift in MEM addr, read a extra byte [addr-1]
89 %macro MEM_VPSLLDDQ 2
90 %define %%addr %1
91 %define %%TMP_YMM %2
92 vmovdqu %%TMP_YMM, [%%addr-1]
93 vmovdqu [%%addr], %%TMP_YMM
94 mov [%%addr], byte 0
95 %endmacro
96
97 align 64
98 default rel
99 section .text
100 ; JOB* submit_job(MB_MGR *state, JOB_MD5 *job)
101 ; arg 1 : rcx : state
102 ; arg 2 : rdx : job
103 global md5_mb_mgr_submit_avx512:function
104 md5_mb_mgr_submit_avx512:
105
106 sub rsp, STACK_SPACE
107 ; we need to save/restore all GPRs because lower layer clobbers them
108 mov [rsp + 8*0], rbx
109 mov [rsp + 8*1], rbp
110 mov [rsp + 8*2], r12
111 mov [rsp + 8*3], r13
112 mov [rsp + 8*4], r14
113 mov [rsp + 8*5], r15
114 %ifidn __OUTPUT_FORMAT__, win64
115 mov [rsp + 8*6], rsi
116 mov [rsp + 8*7], rdi
117 vmovdqa [rsp + 8*8 + 16*0], xmm6
118 vmovdqa [rsp + 8*8 + 16*1], xmm7
119 vmovdqa [rsp + 8*8 + 16*2], xmm8
120 vmovdqa [rsp + 8*8 + 16*3], xmm9
121 vmovdqa [rsp + 8*8 + 16*4], xmm10
122 vmovdqa [rsp + 8*8 + 16*5], xmm11
123 vmovdqa [rsp + 8*8 + 16*6], xmm12
124 vmovdqa [rsp + 8*8 + 16*7], xmm13
125 vmovdqa [rsp + 8*8 + 16*8], xmm14
126 vmovdqa [rsp + 8*8 + 16*9], xmm15
127 %endif
128
129 mov lane, [state + _unused_lanes]
130 and lane, 0x3F
131 MEM_VPSRLDDQ (state + _unused_lanes), unused_lanes
132 imul lane_data, lane, _LANE_DATA_size
133 mov dword [job + _status], STS_BEING_PROCESSED
134 lea lane_data, [state + _ldata + lane_data]
135 mov DWORD(len), [job + _len]
136
137 shl len, 6 ; low 5 bits store idx
138 or len, lane
139
140 mov [lane_data + _job_in_lane], job
141 mov [state + _lens + 4*lane], DWORD(len)
142
143 ; Load digest words from result_digest
144 vmovdqu xmm0, [job + _result_digest + 0*16]
145 vmovd [state + _args_digest + 4*lane + 0*4*16*2], xmm0
146 vpextrd [state + _args_digest + 4*lane + 1*4*16*2], xmm0, 1
147 vpextrd [state + _args_digest + 4*lane + 2*4*16*2], xmm0, 2
148 vpextrd [state + _args_digest + 4*lane + 3*4*16*2], xmm0, 3
149
150 mov p, [job + _buffer]
151 mov [state + _args_data_ptr + 8*lane], p
152
153 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
154 add num_lanes_inuse, 1
155 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
156 cmp num_lanes_inuse, 32
157 jne return_null
158
159 start_loop:
160 ; Find min length
161 vmovdqu ymm0, [state + _lens + 0*32]
162 vmovdqu ymm1, [state + _lens + 1*32]
163
164 vpminud ymm2, ymm0, ymm1 ; ymm2 has {D,C,B,A}
165 vpalignr ymm3, ymm3, ymm2, 8 ; ymm3 has {x,x,D,C}
166 vpminud ymm2, ymm2, ymm3 ; ymm2 has {x,x,E,F}
167 vpalignr ymm3, ymm3, ymm2, 4 ; ymm3 has {x,x,x,E}
168 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
169 vperm2i128 ymm3, ymm2, ymm2, 1 ; ymm3 has halves of ymm2 reversed
170 vpminud ymm2, ymm2, ymm3 ; ymm2 has min value in low dword
171
172 ; Find min length
173 vmovdqu ymm5, [state + _lens + 2*32]
174 vmovdqu ymm6, [state + _lens + 3*32]
175
176 vpminud ymm4, ymm5, ymm6 ; ymm4 has {D,C,B,A}
177 vpalignr ymm3, ymm3, ymm4, 8 ; ymm3 has {x,x,D,C}
178 vpminud ymm4, ymm4, ymm3 ; ymm4 has {x,x,E,F}
179 vpalignr ymm3, ymm3, ymm4, 4 ; ymm3 has {x,x,x,E}
180 vpminud ymm4, ymm4, ymm3 ; ymm4 has min value in low dword
181 vperm2i128 ymm3, ymm4, ymm4, 1 ; ymm3 has halves of ymm4 reversed
182 vpminud ymm4, ymm4, ymm3 ; ymm4 has min value in low dword
183
184 vpminud ymm2, ymm2, ymm4 ; ymm2 has min value in low dword
185 vmovd DWORD(idx), xmm2
186 mov len2, idx
187 and idx, 0x3F
188 shr len2, 6
189 jz len_is_0
190
191 vpand ymm2, ymm2, [rel clear_low_6bits]
192 vpshufd ymm2, ymm2, 0
193
194 vpsubd ymm0, ymm0, ymm2
195 vpsubd ymm1, ymm1, ymm2
196 vpsubd ymm5, ymm5, ymm2
197 vpsubd ymm6, ymm6, ymm2
198
199 vmovdqu [state + _lens + 0*32], ymm0
200 vmovdqu [state + _lens + 1*32], ymm1
201 vmovdqu [state + _lens + 2*32], ymm5
202 vmovdqu [state + _lens + 3*32], ymm6
203
204 ; "state" and "args" are the same address, arg1
205 ; len is arg2
206 call md5_mb_x16x2_avx512
207 ; state and idx are intact
208
209 len_is_0:
210 ; process completed job "idx"
211 imul lane_data, idx, _LANE_DATA_size
212 lea lane_data, [state + _ldata + lane_data]
213
214 mov job_rax, [lane_data + _job_in_lane]
215 mov lane, [state + _unused_lanes]
216 mov qword [lane_data + _job_in_lane], 0
217 mov dword [job_rax + _status], STS_COMPLETED
218
219 shl lane, 8
220 or lane, idx
221 MEM_VPSLLDDQ (state + _unused_lanes), unused_lanes
222 mov [state + _unused_lanes], lane
223
224 mov DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
225 sub num_lanes_inuse, 1
226 mov [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
227
228 mov dword [state + _lens + 4*idx], 0xFFFFFFFF
229
230 vmovd xmm0, [state + _args_digest + 4*idx + 0*4*16*2]
231 vpinsrd xmm0, [state + _args_digest + 4*idx + 1*4*16*2], 1
232 vpinsrd xmm0, [state + _args_digest + 4*idx + 2*4*16*2], 2
233 vpinsrd xmm0, [state + _args_digest + 4*idx + 3*4*16*2], 3
234
235 vmovdqa [job_rax + _result_digest + 0*16], xmm0
236
237 return:
238 %ifidn __OUTPUT_FORMAT__, win64
239 vmovdqa xmm6, [rsp + 8*8 + 16*0]
240 vmovdqa xmm7, [rsp + 8*8 + 16*1]
241 vmovdqa xmm8, [rsp + 8*8 + 16*2]
242 vmovdqa xmm9, [rsp + 8*8 + 16*3]
243 vmovdqa xmm10, [rsp + 8*8 + 16*4]
244 vmovdqa xmm11, [rsp + 8*8 + 16*5]
245 vmovdqa xmm12, [rsp + 8*8 + 16*6]
246 vmovdqa xmm13, [rsp + 8*8 + 16*7]
247 vmovdqa xmm14, [rsp + 8*8 + 16*8]
248 vmovdqa xmm15, [rsp + 8*8 + 16*9]
249 mov rsi, [rsp + 8*6]
250 mov rdi, [rsp + 8*7]
251 %endif
252 mov rbx, [rsp + 8*0]
253 mov rbp, [rsp + 8*1]
254 mov r12, [rsp + 8*2]
255 mov r13, [rsp + 8*3]
256 mov r14, [rsp + 8*4]
257 mov r15, [rsp + 8*5]
258
259 add rsp, STACK_SPACE
260
261 ret
262
263 return_null:
264 xor job_rax, job_rax
265 jmp return
266
267
268 section .data align=32
269
270 align 32
271 clear_low_6bits:
272 dq 0x00000000FFFFFFC0, 0x0000000000000000
273 dq 0x00000000FFFFFFC0, 0x0000000000000000
274
275 %else
276 %ifidn __OUTPUT_FORMAT__, win64
277 global no_md5_mb_mgr_submit_avx512
278 no_md5_mb_mgr_submit_avx512:
279 %endif
280 %endif ; HAVE_AS_KNOWS_AVX512