]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_sha_512_submit_avx.asm
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx / mb_mgr_hmac_sha_512_submit_avx.asm
1 ;;
2 ;; Copyright (c) 2012-2018, Intel Corporation
3 ;;
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
6 ;;
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
15 ;;
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 ;;
27
28 %include "include/os.asm"
29 %include "job_aes_hmac.asm"
30 %include "mb_mgr_datastruct.asm"
31 %include "include/reg_sizes.asm"
32 %include "include/memcpy.asm"
33 %include "include/const.inc"
34
35 extern sha512_x2_avx
36
37 section .data
38 default rel
39 align 16
40 byteswap: ;ddq 0x08090a0b0c0d0e0f0001020304050607
41 dq 0x0001020304050607, 0x08090a0b0c0d0e0f
42
43 section .text
44
45 %ifndef FUNC
46 %define FUNC submit_job_hmac_sha_512_avx
47 %define SHA_X_DIGEST_SIZE 512
48 %endif
49
50 %if 1
51 %ifdef LINUX
52 %define arg1 rdi
53 %define arg2 rsi
54 %define reg3 rcx
55 %define reg4 rdx
56 %else
57 %define arg1 rcx
58 %define arg2 rdx
59 %define reg3 rdi
60 %define reg4 rsi
61 %endif
62
63 %define state arg1
64 %define job arg2
65 %define len2 arg2
66
67
68 ; idx needs to be in rbx, rbp, r12-r15
69 %define last_len rbp
70 %define idx rbp
71
72 %define p r11
73 %define start_offset r11
74
75 %define unused_lanes rbx
76 %define tmp4 rbx
77
78 %define job_rax rax
79 %define len rax
80
81 %define size_offset reg3
82 %define tmp2 reg3
83
84 %define lane reg4
85 %define tmp3 reg4
86
87 %define extra_blocks r8
88
89 %define tmp r9
90 %define p2 r9
91
92 %define lane_data r10
93
94 %endif
95
96 ; This routine clobbers rbx, rbp, rsi, rdi
97 struc STACK
98 _gpr_save: resq 4
99 _rsp_save: resq 1
100 endstruc
101
102 ; JOB* FUNC(MB_MGR_HMAC_sha_512_OOO *state, JOB_AES_HMAC *job)
103 ; arg 1 : rcx : state
104 ; arg 2 : rdx : job
105 MKGLOBAL(FUNC,function,internal)
106 FUNC:
107
108 mov rax, rsp
109 sub rsp, STACK_size
110 and rsp, -16
111
112 mov [rsp + _gpr_save + 8*0], rbx
113 mov [rsp + _gpr_save + 8*1], rbp
114 %ifndef LINUX
115 mov [rsp + _gpr_save + 8*2], rsi
116 mov [rsp + _gpr_save + 8*3], rdi
117 %endif
118 mov [rsp + _rsp_save], rax ; original SP
119
120 mov unused_lanes, [state + _unused_lanes_sha512]
121 movzx lane, BYTE(unused_lanes)
122 shr unused_lanes, 8
123 imul lane_data, lane, _SHA512_LANE_DATA_size
124 lea lane_data, [state + _ldata_sha512 + lane_data]
125 mov [state + _unused_lanes_sha512], unused_lanes
126 mov len, [job + _msg_len_to_hash_in_bytes]
127 mov tmp, len
128 shr tmp, 7 ; divide by 128, len in terms of blocks
129
130 mov [lane_data + _job_in_lane_sha512], job
131 mov dword [lane_data + _outer_done_sha512], 0
132
133 vmovdqa xmm0, [state + _lens_sha512]
134 XVPINSRW xmm0, xmm1, p, lane, tmp, scale_x16
135 vmovdqa [state + _lens_sha512], xmm0
136
137 mov last_len, len
138 and last_len, 127
139 lea extra_blocks, [last_len + 17 + 127]
140 shr extra_blocks, 7
141 mov [lane_data + _extra_blocks_sha512], DWORD(extra_blocks)
142
143 mov p, [job + _src]
144 add p, [job + _hash_start_src_offset_in_bytes]
145 mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], p
146
147 cmp len, 128
148 jb copy_lt128
149
150 fast_copy:
151 add p, len
152 %assign I 0
153 %rep 2
154 vmovdqu xmm0, [p - 128 + I*4*16 + 0*16]
155 vmovdqu xmm1, [p - 128 + I*4*16 + 1*16]
156 vmovdqu xmm2, [p - 128 + I*4*16 + 2*16]
157 vmovdqu xmm3, [p - 128 + I*4*16 + 3*16]
158 vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 0*16], xmm0
159 vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 1*16], xmm1
160 vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 2*16], xmm2
161 vmovdqa [lane_data + _extra_block_sha512 + I*4*16 + 3*16], xmm3
162 %assign I (I+1)
163 %endrep
164
165 end_fast_copy:
166
167 mov size_offset, extra_blocks
168 shl size_offset, 7
169 sub size_offset, last_len
170 add size_offset, 128-8
171 mov [lane_data + _size_offset_sha512], DWORD(size_offset)
172 mov start_offset, 128
173 sub start_offset, last_len
174 mov [lane_data + _start_offset_sha512], DWORD(start_offset)
175
176 lea tmp, [8*128 + 8*len]
177 bswap tmp
178 mov [lane_data + _extra_block_sha512 + size_offset], tmp
179
180 mov tmp, [job + _auth_key_xor_ipad]
181
182 %assign I 0
183 %rep 4
184 vmovdqu xmm0, [tmp + I * 2 * 8]
185 vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I)*SHA512_DIGEST_ROW_SIZE], xmm0
186 vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*lane + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
187 %assign I (I+1)
188 %endrep
189
190 test len, ~127
191 jnz ge128_bytes
192
193 lt128_bytes:
194 vmovdqa xmm0, [state + _lens_sha512]
195 XVPINSRW xmm0, xmm1, tmp, lane, extra_blocks, scale_x16
196 vmovdqa [state + _lens_sha512], xmm0
197
198 lea tmp, [lane_data + _extra_block_sha512 + start_offset]
199 mov [state + _args_data_ptr_sha512 + PTR_SZ*lane], tmp ;; 8 to hold a UINT8
200 mov dword [lane_data + _extra_blocks_sha512], 0
201
202 ge128_bytes:
203 cmp unused_lanes, 0xff
204 jne return_null
205 jmp start_loop
206
207 align 16
208 start_loop:
209 ; Find min length
210 vmovdqa xmm0, [state + _lens_sha512]
211 vphminposuw xmm1, xmm0
212 vpextrw DWORD(len2), xmm1, 0 ; min value
213 vpextrw DWORD(idx), xmm1, 1 ; min index (0...1)
214 cmp len2, 0
215 je len_is_0
216
217 vpshuflw xmm1, xmm1, 0xA0
218 vpsubw xmm0, xmm0, xmm1
219 vmovdqa [state + _lens_sha512], xmm0
220
221 ; "state" and "args" are the same address, arg1
222 ; len is arg2
223 call sha512_x2_avx
224 ; state and idx are intact
225
226 len_is_0:
227 ; process completed job "idx"
228 imul lane_data, idx, _SHA512_LANE_DATA_size
229 lea lane_data, [state + _ldata_sha512 + lane_data]
230 mov DWORD(extra_blocks), [lane_data + _extra_blocks_sha512]
231 cmp extra_blocks, 0
232 jne proc_extra_blocks
233 cmp dword [lane_data + _outer_done_sha512], 0
234 jne end_loop
235
236 proc_outer:
237 mov dword [lane_data + _outer_done_sha512], 1
238 mov DWORD(size_offset), [lane_data + _size_offset_sha512]
239 mov qword [lane_data + _extra_block_sha512 + size_offset], 0
240
241 vmovdqa xmm0, [state + _lens_sha512]
242 XVPINSRW xmm0, xmm1, tmp, idx, 1, scale_x16
243 vmovdqa [state + _lens_sha512], xmm0
244
245 lea tmp, [lane_data + _outer_block_sha512]
246 mov job, [lane_data + _job_in_lane_sha512]
247 mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp
248
249 %assign I 0
250 %rep (SHA_X_DIGEST_SIZE / (8 * 16))
251 vmovq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE]
252 vpinsrq xmm0, [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], 1
253 vpshufb xmm0, [rel byteswap]
254 vmovdqa [lane_data + _outer_block_sha512 + I * 16], xmm0
255 %assign I (I+1)
256 %endrep
257
258 mov tmp, [job + _auth_key_xor_opad]
259 %assign I 0
260 %rep 4
261 vmovdqu xmm0, [tmp + I * 16]
262 vmovq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*I*SHA512_DIGEST_ROW_SIZE], xmm0
263 vpextrq [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + (2*I + 1)*SHA512_DIGEST_ROW_SIZE], xmm0, 1
264 %assign I (I+1)
265 %endrep
266
267 jmp start_loop
268
269 align 16
270 proc_extra_blocks:
271 mov DWORD(start_offset), [lane_data + _start_offset_sha512]
272
273 vmovdqa xmm0, [state + _lens_sha512]
274 XVPINSRW xmm0, xmm1, tmp, idx, extra_blocks, scale_x16
275 vmovdqa [state + _lens_sha512], xmm0
276
277 lea tmp, [lane_data + _extra_block_sha512 + start_offset]
278 mov [state + _args_data_ptr_sha512 + PTR_SZ*idx], tmp ;; idx is index of shortest length message
279 mov dword [lane_data + _extra_blocks_sha512], 0
280 jmp start_loop
281
282 align 16
283 copy_lt128:
284 ;; less than one message block of data
285 ;; destination extra block but backwards by len from where 0x80 pre-populated
286 lea p2, [lane_data + _extra_block + 128]
287 sub p2, len
288 memcpy_avx_128_1 p2, p, len, tmp4, tmp2, xmm0, xmm1, xmm2, xmm3
289 mov unused_lanes, [state + _unused_lanes_sha512]
290 jmp end_fast_copy
291
292 return_null:
293 xor job_rax, job_rax
294 jmp return
295
296 align 16
297 end_loop:
298 mov job_rax, [lane_data + _job_in_lane_sha512]
299 mov unused_lanes, [state + _unused_lanes_sha512]
300 mov qword [lane_data + _job_in_lane_sha512], 0
301 or dword [job_rax + _status], STS_COMPLETED_HMAC
302 shl unused_lanes, 8
303 or unused_lanes, idx
304 mov [state + _unused_lanes_sha512], unused_lanes
305
306 mov p, [job_rax + _auth_tag_output]
307
308 %if (SHA_X_DIGEST_SIZE != 384)
309 cmp qword [job_rax + _auth_tag_output_len_in_bytes], 32
310 jne copy_full_digest
311 %else
312 cmp qword [job_rax + _auth_tag_output_len_in_bytes], 24
313 jne copy_full_digest
314 %endif
315 ;; copy 32 bytes for SHA512 / 24 bytes and SHA384
316 mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
317 mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
318 mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
319 %if (SHA_X_DIGEST_SIZE != 384)
320 mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
321 %endif
322 bswap QWORD(tmp)
323 bswap QWORD(tmp2)
324 bswap QWORD(tmp3)
325 %if (SHA_X_DIGEST_SIZE != 384)
326 bswap QWORD(tmp4)
327 %endif
328 mov [p + 0*8], QWORD(tmp)
329 mov [p + 1*8], QWORD(tmp2)
330 mov [p + 2*8], QWORD(tmp3)
331 %if (SHA_X_DIGEST_SIZE != 384)
332 mov [p + 3*8], QWORD(tmp4)
333 %endif
334 jmp clear_ret
335
336 copy_full_digest:
337 ;; copy 64 bytes for SHA512 / 48 bytes and SHA384
338 mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 0*SHA512_DIGEST_ROW_SIZE]
339 mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 1*SHA512_DIGEST_ROW_SIZE]
340 mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 2*SHA512_DIGEST_ROW_SIZE]
341 mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 3*SHA512_DIGEST_ROW_SIZE]
342 bswap QWORD(tmp)
343 bswap QWORD(tmp2)
344 bswap QWORD(tmp3)
345 bswap QWORD(tmp4)
346 mov [p + 0*8], QWORD(tmp)
347 mov [p + 1*8], QWORD(tmp2)
348 mov [p + 2*8], QWORD(tmp3)
349 mov [p + 3*8], QWORD(tmp4)
350
351 mov QWORD(tmp), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 4*SHA512_DIGEST_ROW_SIZE]
352 mov QWORD(tmp2), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 5*SHA512_DIGEST_ROW_SIZE]
353 %if (SHA_X_DIGEST_SIZE != 384)
354 mov QWORD(tmp3), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA512_DIGEST_ROW_SIZE]
355 mov QWORD(tmp4), [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA512_DIGEST_ROW_SIZE]
356 %endif
357 bswap QWORD(tmp)
358 bswap QWORD(tmp2)
359 %if (SHA_X_DIGEST_SIZE != 384)
360 bswap QWORD(tmp3)
361 bswap QWORD(tmp4)
362 %endif
363 mov [p + 4*8], QWORD(tmp)
364 mov [p + 5*8], QWORD(tmp2)
365 %if (SHA_X_DIGEST_SIZE != 384)
366 mov [p + 6*8], QWORD(tmp3)
367 mov [p + 7*8], QWORD(tmp4)
368 %endif
369
370 clear_ret:
371
372 %ifdef SAFE_DATA
373 ;; Clear digest (48B/64B), outer_block (48B/64B) and extra_block (128B) of returned job
374 %assign J 0
375 %rep 6
376 mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + J*SHA512_DIGEST_ROW_SIZE], 0
377 %assign J (J+1)
378 %endrep
379 %if (SHA_X_DIGEST_SIZE != 384)
380 mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 6*SHA256_DIGEST_ROW_SIZE], 0
381 mov qword [state + _args_digest_sha512 + SHA512_DIGEST_WORD_SIZE*idx + 7*SHA256_DIGEST_ROW_SIZE], 0
382 %endif
383
384 vpxor xmm0, xmm0
385 imul lane_data, idx, _SHA512_LANE_DATA_size
386 lea lane_data, [state + _ldata_sha512 + lane_data]
387 ;; Clear first 128 bytes of extra_block
388 %assign offset 0
389 %rep 8
390 vmovdqa [lane_data + _extra_block + offset], xmm0
391 %assign offset (offset + 16)
392 %endrep
393
394 ;; Clear first 48 bytes (SHA-384) or 64 bytes (SHA-512) of outer_block
395 vmovdqa [lane_data + _outer_block], xmm0
396 vmovdqa [lane_data + _outer_block + 16], xmm0
397 vmovdqa [lane_data + _outer_block + 32], xmm0
398 %if (SHA_X_DIGEST_SIZE != 384)
399 vmovdqa [lane_data + _outer_block + 48], xmm0
400 %endif
401 %endif ;; SAFE_DATA
402
403 return:
404 mov rbx, [rsp + _gpr_save + 8*0]
405 mov rbp, [rsp + _gpr_save + 8*1]
406 %ifndef LINUX
407 mov rsi, [rsp + _gpr_save + 8*2]
408 mov rdi, [rsp + _gpr_save + 8*3]
409 %endif
410 mov rsp, [rsp + _rsp_save] ; original SP
411
412 ret
413
414 %ifdef LINUX
415 section .note.GNU-stack noalloc noexec nowrite progbits
416 %endif