]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_flush_avx2.asm
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx2 / mb_mgr_hmac_flush_avx2.asm
CommitLineData
11fdf7f2
TL
1;;
2;; Copyright (c) 2012-2018, Intel Corporation
3;;
4;; Redistribution and use in source and binary forms, with or without
5;; modification, are permitted provided that the following conditions are met:
6;;
7;; * Redistributions of source code must retain the above copyright notice,
8;; this list of conditions and the following disclaimer.
9;; * Redistributions in binary form must reproduce the above copyright
10;; notice, this list of conditions and the following disclaimer in the
11;; documentation and/or other materials provided with the distribution.
12;; * Neither the name of Intel Corporation nor the names of its contributors
13;; may be used to endorse or promote products derived from this software
14;; without specific prior written permission.
15;;
16;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26;;
27
28%include "os.asm"
29%include "job_aes_hmac.asm"
30%include "mb_mgr_datastruct.asm"
31%include "reg_sizes.asm"
32;%define DO_DBGPRINT
33%include "dbgprint.asm"
34extern sha1_x8_avx2
35
36section .data
37default rel
38
39align 16
40byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
41 dq 0x0405060700010203, 0x0c0d0e0f08090a0b
42x80: ;ddq 0x00000000000000000000000000000080
43 dq 0x0000000000000080, 0x0000000000000000
44x00: ;ddq 0x00000000000000000000000000000000
45 dq 0x0000000000000000, 0x0000000000000000
46len_masks:
47 ;ddq 0x0000000000000000000000000000FFFF
48 dq 0x000000000000FFFF, 0x0000000000000000
49 ;ddq 0x000000000000000000000000FFFF0000
50 dq 0x00000000FFFF0000, 0x0000000000000000
51 ;ddq 0x00000000000000000000FFFF00000000
52 dq 0x0000FFFF00000000, 0x0000000000000000
53 ;ddq 0x0000000000000000FFFF000000000000
54 dq 0xFFFF000000000000, 0x0000000000000000
55 ;ddq 0x000000000000FFFF0000000000000000
56 dq 0x0000000000000000, 0x000000000000FFFF
57 ;ddq 0x00000000FFFF00000000000000000000
58 dq 0x0000000000000000, 0x00000000FFFF0000
59 ;ddq 0x0000FFFF000000000000000000000000
60 dq 0x0000000000000000, 0x0000FFFF00000000
61 ;ddq 0xFFFF0000000000000000000000000000
62 dq 0x0000000000000000, 0xFFFF000000000000
63lane_1: dq 1
64lane_2: dq 2
65lane_3: dq 3
66lane_4: dq 4
67lane_5: dq 5
68lane_6: dq 6
69lane_7: dq 7
70
71section .text
72
73%if 1
74%ifdef LINUX
75%define arg1 rdi
76%define arg2 rsi
77%else
78%define arg1 rcx
79%define arg2 rdx
80%endif
81
82%define state arg1
83%define job arg2
84%define len2 arg2
85
86
87; idx needs to be in rbx, rdi, rbp
88%define idx rbp
89
90%define unused_lanes r9
91%define lane_data r9
92%define tmp2 r9
93
94%define job_rax rax
95%define tmp1 rax
96%define size_offset rax
97%define tmp rax
98%define start_offset rax
99
100%define tmp3 arg1
101
102%define extra_blocks arg2
103%define p arg2
104
105%define tmp4 r8
106
107%endif
108
109; we clobber rbp, called routine clobbers r12-r15
110struc STACK
111_gpr_save: resq 5
112_rsp_save: resq 1
113endstruc
114
115%define APPEND(a,b) a %+ b
116
117; JOB* flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state)
118; arg 1 : rcx : state
119MKGLOBAL(flush_job_hmac_avx2,function,internal)
120flush_job_hmac_avx2:
121
122 mov rax, rsp
123 sub rsp, STACK_size
124 and rsp, -32 ; align stack to 32 byte boundary
125 mov [rsp + _gpr_save + 8*0], rbp
126 mov [rsp + _gpr_save + 8*1], r12
127 mov [rsp + _gpr_save + 8*2], r13
128 mov [rsp + _gpr_save + 8*3], r14
129 mov [rsp + _gpr_save + 8*4], r15
130 mov [rsp + _rsp_save], rax
131
132 mov unused_lanes, [state + _unused_lanes]
133 bt unused_lanes, 32+3
134 jc return_null
135
136 ; find a lane with a non-null job
137 xor idx, idx
138%assign I 1
139%rep 7
140 cmp qword [state + _ldata + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
141 cmovne idx, [rel APPEND(lane_,I)]
142%assign I (I+1)
143%endrep
144
145copy_lane_data:
146 ; copy valid lane (idx) to empty lanes
147 vmovdqa xmm0, [state + _lens]
148 mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
149
150%assign I 0
151%rep 8
152 cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
153 jne APPEND(skip_,I)
154 mov [state + _args_data_ptr + PTR_SZ*I], tmp
155 vpor xmm0, xmm0, [rel len_masks + 16*I]
156APPEND(skip_,I):
157%assign I (I+1)
158%endrep
159
160 vmovdqa [state + _lens], xmm0
161
162 vphminposuw xmm1, xmm0
163 vpextrw DWORD(len2), xmm1, 0 ; min value
164 vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
165 DBGPRINTL64 "FLUSH min_length", len2
166 DBGPRINTL64 "FLUSH min_length index ", idx
167 cmp len2, 0
168 je len_is_0
169
170 vpbroadcastw xmm1, xmm1
171 DBGPRINTL_XMM "FLUSH lens after shuffle", xmm1
172
173 vpsubw xmm0, xmm0, xmm1
174 vmovdqa [state + _lens], xmm0
175 DBGPRINTL_XMM "FLUSH lens immediately after min subtraction", xmm0
176
177 ; "state" and "args" are the same address, arg1
178 ; len is arg2
179 call sha1_x8_avx2
180 ; state and idx are intact
181
182len_is_0:
183 ; process completed job "idx"
184 imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
185 lea lane_data, [state + _ldata + lane_data]
186 mov DWORD(extra_blocks), [lane_data + _extra_blocks]
187 cmp extra_blocks, 0
188 jne proc_extra_blocks
189 cmp dword [lane_data + _outer_done], 0
190 jne end_loop
191
192proc_outer:
193 mov dword [lane_data + _outer_done], 1
194 mov DWORD(size_offset), [lane_data + _size_offset]
195 mov qword [lane_data + _extra_block + size_offset], 0
196 mov word [state + _lens + 2*idx], 1
197 lea tmp, [lane_data + _outer_block]
198 mov job, [lane_data + _job_in_lane]
199 mov [state + _args_data_ptr + PTR_SZ*idx], tmp
200
201 vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
202 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
203 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
204 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
205 vpshufb xmm0, xmm0, [rel byteswap]
206 mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
207 bswap DWORD(tmp)
208 vmovdqa [lane_data + _outer_block], xmm0
209 mov [lane_data + _outer_block + 4*4], DWORD(tmp)
210
211 mov tmp, [job + _auth_key_xor_opad]
212 vmovdqu xmm0, [tmp]
213 mov DWORD(tmp), [tmp + 4*4]
214 vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
215 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
216 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
217 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
218 mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
219 jmp copy_lane_data
220
221 align 16
222proc_extra_blocks:
223 mov DWORD(start_offset), [lane_data + _start_offset]
224 mov [state + _lens + 2*idx], WORD(extra_blocks)
225 lea tmp, [lane_data + _extra_block + start_offset]
226 mov [state + _args_data_ptr + PTR_SZ*idx], tmp
227 mov dword [lane_data + _extra_blocks], 0
228 jmp copy_lane_data
229
230return_null:
231 xor job_rax, job_rax
232 jmp return
233
234 align 16
235end_loop:
236 mov job_rax, [lane_data + _job_in_lane]
237 mov qword [lane_data + _job_in_lane], 0
238 or dword [job_rax + _status], STS_COMPLETED_HMAC
239 mov unused_lanes, [state + _unused_lanes]
240 shl unused_lanes, 4 ;; a nibble
241 or unused_lanes, idx
242 mov [state + _unused_lanes], unused_lanes
243
244 mov p, [job_rax + _auth_tag_output]
245
246 ; copy 12 bytes
247 mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
248 mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
9f95a23c 249 mov DWORD(r12), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
11fdf7f2
TL
250 bswap DWORD(tmp2)
251 bswap DWORD(tmp4)
9f95a23c 252 bswap DWORD(r12)
11fdf7f2
TL
253 mov [p + 0*4], DWORD(tmp2)
254 mov [p + 1*4], DWORD(tmp4)
9f95a23c
TL
255 mov [p + 2*4], DWORD(r12)
256
257 cmp qword [job_rax + _auth_tag_output_len_in_bytes], 12
258 je return
259
260 ;; copy remaining 8 bytes to return 20 byte digest
261 mov DWORD(r13), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE]
262 mov DWORD(r14), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
263 bswap DWORD(r13)
264 bswap DWORD(r14)
265 mov [p + 3*SHA1_DIGEST_WORD_SIZE], DWORD(r13)
266 mov [p + 4*SHA1_DIGEST_WORD_SIZE], DWORD(r14)
11fdf7f2
TL
267
268return:
269 vzeroupper
270 mov rbp, [rsp + _gpr_save + 8*0]
271 mov r12, [rsp + _gpr_save + 8*1]
272 mov r13, [rsp + _gpr_save + 8*2]
273 mov r14, [rsp + _gpr_save + 8*3]
274 mov r15, [rsp + _gpr_save + 8*4]
275 mov rsp, [rsp + _rsp_save]
276 ret
277
278%ifdef LINUX
279section .note.GNU-stack noalloc noexec nowrite progbits
280%endif