]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/intel-ipsec-mb/sse/mb_mgr_hmac_md5_flush_sse.asm
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / sse / mb_mgr_hmac_md5_flush_sse.asm
1 ;;
2 ;; Copyright (c) 2012-2018, Intel Corporation
3 ;;
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
6 ;;
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
15 ;;
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 ;;
27
28 %include "include/os.asm"
29 %include "job_aes_hmac.asm"
30 %include "mb_mgr_datastruct.asm"
31 %include "include/reg_sizes.asm"
32
33 extern md5_x4x2_sse
34
35 section .data
36 default rel
37 align 16
38 dupw: ;ddq 0x01000100010001000100010001000100
39 dq 0x0100010001000100, 0x0100010001000100
40 len_masks:
41 ;ddq 0x0000000000000000000000000000FFFF
42 dq 0x000000000000FFFF, 0x0000000000000000
43 ;ddq 0x000000000000000000000000FFFF0000
44 dq 0x00000000FFFF0000, 0x0000000000000000
45 ;ddq 0x00000000000000000000FFFF00000000
46 dq 0x0000FFFF00000000, 0x0000000000000000
47 ;ddq 0x0000000000000000FFFF000000000000
48 dq 0xFFFF000000000000, 0x0000000000000000
49 ;ddq 0x000000000000FFFF0000000000000000
50 dq 0x0000000000000000, 0x000000000000FFFF
51 ;ddq 0x00000000FFFF00000000000000000000
52 dq 0x0000000000000000, 0x00000000FFFF0000
53 ;ddq 0x0000FFFF000000000000000000000000
54 dq 0x0000000000000000, 0x0000FFFF00000000
55 ;ddq 0xFFFF0000000000000000000000000000
56 dq 0x0000000000000000, 0xFFFF000000000000
57 one: dq 1
58 two: dq 2
59 three: dq 3
60 four: dq 4
61 five: dq 5
62 six: dq 6
63 seven: dq 7
64
65 section .text
66
67 %if 1
68 %ifdef LINUX
69 %define arg1 rdi
70 %define arg2 rsi
71 %else
72 %define arg1 rcx
73 %define arg2 rdx
74 %endif
75
76 %define state arg1
77 %define job arg2
78 %define len2 arg2
79
80
81 ; idx needs to be in rbp
82 %define idx rbp
83
84 ; unused_lanes must be in rax-rdx
85 %define unused_lanes rbx
86 %define lane_data rbx
87 %define tmp2 rbx
88
89 %define job_rax rax
90 %define tmp1 rax
91 %define size_offset rax
92 %define tmp rax
93 %define start_offset rax
94
95 %define tmp3 arg1
96
97 %define extra_blocks arg2
98 %define p arg2
99
100 %define tmp4 r8
101 %define tmp5 r9
102
103 %endif
104
105 ; This routine and/or the called routine clobbers all GPRs
106 struc STACK
107 _gpr_save: resq 8
108 _rsp_save: resq 1
109 endstruc
110
111 %define APPEND(a,b) a %+ b
112
113 ; JOB* flush_job_hmac_md5_sse(MB_MGR_HMAC_MD5_OOO *state)
114 ; arg 1 : rcx : state
115 MKGLOBAL(flush_job_hmac_md5_sse,function,internal)
116 flush_job_hmac_md5_sse:
117
118 mov rax, rsp
119 sub rsp, STACK_size
120 and rsp, -16
121
122 mov [rsp + _gpr_save + 8*0], rbx
123 mov [rsp + _gpr_save + 8*1], rbp
124 mov [rsp + _gpr_save + 8*2], r12
125 mov [rsp + _gpr_save + 8*3], r13
126 mov [rsp + _gpr_save + 8*4], r14
127 mov [rsp + _gpr_save + 8*5], r15
128 %ifndef LINUX
129 mov [rsp + _gpr_save + 8*6], rsi
130 mov [rsp + _gpr_save + 8*7], rdi
131 %endif
132 mov [rsp + _rsp_save], rax ; original SP
133
134 mov unused_lanes, [state + _unused_lanes_md5]
135 bt unused_lanes, 32+3
136 jc return_null
137
138 ; find a lane with a non-null job
139 xor idx, idx
140 cmp qword [state + _ldata_md5 + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
141 cmovne idx, [rel one]
142 cmp qword [state + _ldata_md5 + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
143 cmovne idx, [rel two]
144 cmp qword [state + _ldata_md5 + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
145 cmovne idx, [rel three]
146 cmp qword [state + _ldata_md5 + 4 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
147 cmovne idx, [rel four]
148 cmp qword [state + _ldata_md5 + 5 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
149 cmovne idx, [rel five]
150 cmp qword [state + _ldata_md5 + 6 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
151 cmovne idx, [rel six]
152 cmp qword [state + _ldata_md5 + 7 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane],0
153 cmovne idx, [rel seven]
154
155 copy_lane_data:
156 ; copy good lane (idx) to empty lanes
157 movdqa xmm0, [state + _lens_md5]
158 mov tmp, [state + _args_data_ptr_md5 + PTR_SZ*idx]
159
160 %assign I 0
161 %rep 8
162 cmp qword [state + _ldata_md5 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
163 jne APPEND(skip_,I)
164 mov [state + _args_data_ptr_md5 + PTR_SZ*I], tmp
165 por xmm0, [rel len_masks + 16*I]
166 APPEND(skip_,I):
167 %assign I (I+1)
168 %endrep
169
170 movdqa [state + _lens_md5], xmm0
171
172 phminposuw xmm1, xmm0
173 pextrw len2, xmm1, 0 ; min value
174 pextrw idx, xmm1, 1 ; min index (0...3)
175 cmp len2, 0
176 je len_is_0
177
178 pshufb xmm1, [rel dupw] ; duplicate words across all lanes
179 psubw xmm0, xmm1
180 movdqa [state + _lens_md5], xmm0
181
182 ; "state" and "args" are the same address, arg1
183 ; len is arg2
184 call md5_x4x2_sse
185 ; state and idx are intact
186
187 len_is_0:
188 ; process completed job "idx"
189 imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
190 lea lane_data, [state + _ldata_md5 + lane_data]
191 mov DWORD(extra_blocks), [lane_data + _extra_blocks]
192 cmp extra_blocks, 0
193 jne proc_extra_blocks
194 cmp dword [lane_data + _outer_done], 0
195 jne end_loop
196
197 proc_outer:
198 mov dword [lane_data + _outer_done], 1
199 mov DWORD(size_offset), [lane_data + _size_offset]
200 mov qword [lane_data + _extra_block + size_offset], 0
201 mov word [state + _lens_md5 + 2*idx], 1
202 lea tmp, [lane_data + _outer_block]
203 mov job, [lane_data + _job_in_lane]
204 mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
205
206 movd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
207 pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], 1
208 pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], 2
209 pinsrd xmm0, [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], 3
210 ; pshufb xmm0, [byteswap wrt rip]
211 movdqa [lane_data + _outer_block], xmm0
212
213 mov tmp, [job + _auth_key_xor_opad]
214 movdqu xmm0, [tmp]
215 movd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE], xmm0
216 pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE], xmm0, 1
217 pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE], xmm0, 2
218 pextrd [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE], xmm0, 3
219 jmp copy_lane_data
220
221 align 16
222 proc_extra_blocks:
223 mov DWORD(start_offset), [lane_data + _start_offset]
224 mov [state + _lens_md5 + 2*idx], WORD(extra_blocks)
225 lea tmp, [lane_data + _extra_block + start_offset]
226 mov [state + _args_data_ptr_md5 + PTR_SZ*idx], tmp
227 mov dword [lane_data + _extra_blocks], 0
228 jmp copy_lane_data
229
230 return_null:
231 xor job_rax, job_rax
232 jmp return
233
234 align 16
235 end_loop:
236 mov job_rax, [lane_data + _job_in_lane]
237 mov qword [lane_data + _job_in_lane], 0
238 or dword [job_rax + _status], STS_COMPLETED_HMAC
239 mov unused_lanes, [state + _unused_lanes_md5]
240 shl unused_lanes, 4
241 or unused_lanes, idx
242 mov [state + _unused_lanes_md5], unused_lanes
243
244 mov p, [job_rax + _auth_tag_output]
245
246 ; copy 12 bytes
247 mov DWORD(tmp2), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 0*MD5_DIGEST_ROW_SIZE]
248 mov DWORD(tmp4), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 1*MD5_DIGEST_ROW_SIZE]
249 mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 2*MD5_DIGEST_ROW_SIZE]
250 ; bswap DWORD(tmp2)
251 ; bswap DWORD(tmp4)
252 ; bswap DWORD(tmp3)
253 mov [p + 0*4], DWORD(tmp2)
254 mov [p + 1*4], DWORD(tmp4)
255 mov [p + 2*4], DWORD(tmp5)
256
257 cmp DWORD [job_rax + _auth_tag_output_len_in_bytes], 12
258 je clear_ret
259
260 ; copy 16 bytes
261 mov DWORD(tmp5), [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*idx + 3*MD5_DIGEST_ROW_SIZE]
262 mov [p + 3*4], DWORD(tmp5)
263
264 clear_ret:
265
266 %ifdef SAFE_DATA
267 pxor xmm0, xmm0
268
269 ;; Clear digest (16B), outer_block (16B) and extra_block (64B)
270 ;; of returned job and NULL jobs
271 %assign I 0
272 %rep 8
273 cmp qword [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
274 jne APPEND(skip_clear_,I)
275
276 ;; Clear digest (16 bytes)
277 %assign J 0
278 %rep 4
279 mov dword [state + _args_digest_md5 + MD5_DIGEST_WORD_SIZE*I + J*MD5_DIGEST_ROW_SIZE], 0
280 %assign J (J+1)
281 %endrep
282
283 lea lane_data, [state + _ldata_md5 + (I*_HMAC_SHA1_LANE_DATA_size)]
284 ;; Clear first 64 bytes of extra_block
285 %assign offset 0
286 %rep 4
287 movdqa [lane_data + _extra_block + offset], xmm0
288 %assign offset (offset + 16)
289 %endrep
290
291 ;; Clear first 16 bytes of outer_block
292 movdqa [lane_data + _outer_block], xmm0
293
294 APPEND(skip_clear_,I):
295 %assign I (I+1)
296 %endrep
297
298 %endif ;; SAFE_DATA
299
300 return:
301
302 mov rbx, [rsp + _gpr_save + 8*0]
303 mov rbp, [rsp + _gpr_save + 8*1]
304 mov r12, [rsp + _gpr_save + 8*2]
305 mov r13, [rsp + _gpr_save + 8*3]
306 mov r14, [rsp + _gpr_save + 8*4]
307 mov r15, [rsp + _gpr_save + 8*5]
308 %ifndef LINUX
309 mov rsi, [rsp + _gpr_save + 8*6]
310 mov rdi, [rsp + _gpr_save + 8*7]
311 %endif
312 mov rsp, [rsp + _rsp_save] ; original SP
313
314 ret
315
316 %ifdef LINUX
317 section .note.GNU-stack noalloc noexec nowrite progbits
318 %endif