]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/intel-ipsec-mb/avx2/mb_mgr_hmac_sha_256_flush_avx2.asm
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx2 / mb_mgr_hmac_sha_256_flush_avx2.asm
1 ;;
2 ;; Copyright (c) 2012-2018, Intel Corporation
3 ;;
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
6 ;;
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
15 ;;
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 ;;
27
28 %include "os.asm"
29 %include "job_aes_hmac.asm"
30 %include "mb_mgr_datastruct.asm"
31 %include "reg_sizes.asm"
32
33 extern sha256_oct_avx2
34
35 section .data
36 default rel
37 align 16
38 byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
39 dq 0x0405060700010203, 0x0c0d0e0f08090a0b
40 len_masks:
41 ;ddq 0x0000000000000000000000000000FFFF
42 dq 0x000000000000FFFF, 0x0000000000000000
43 ;ddq 0x000000000000000000000000FFFF0000
44 dq 0x00000000FFFF0000, 0x0000000000000000
45 ;ddq 0x00000000000000000000FFFF00000000
46 dq 0x0000FFFF00000000, 0x0000000000000000
47 ;ddq 0x0000000000000000FFFF000000000000
48 dq 0xFFFF000000000000, 0x0000000000000000
49 ;ddq 0x000000000000FFFF0000000000000000
50 dq 0x0000000000000000, 0x000000000000FFFF
51 ;ddq 0x00000000FFFF00000000000000000000
52 dq 0x0000000000000000, 0x00000000FFFF0000
53 ;ddq 0x0000FFFF000000000000000000000000
54 dq 0x0000000000000000, 0x0000FFFF00000000
55 ;ddq 0xFFFF0000000000000000000000000000
56 dq 0x0000000000000000, 0xFFFF000000000000
57 lane_1: dq 1
58 lane_2: dq 2
59 lane_3: dq 3
60 lane_4: dq 4
61 lane_5: dq 5
62 lane_6: dq 6
63 lane_7: dq 7
64
65 section .text
66
67 %ifndef FUNC
68 %define FUNC flush_job_hmac_sha_256_avx2
69 %endif
70
71 %if 1
72 %ifdef LINUX
73 %define arg1 rdi
74 %define arg2 rsi
75 %define reg3 rdx
76 %else
77 %define arg1 rcx
78 %define arg2 rdx
79 %define reg3 rsi
80 %endif
81
82 %define state arg1
83 %define job arg2
84 %define len2 arg2
85
86
87 ; idx needs to be in rbp, r15
88 %define idx rbp
89
90 %define unused_lanes r10
91 %define tmp5 r10
92
93 %define lane_data rbx
94 %define tmp2 rbx
95
96 %define job_rax rax
97 %define tmp1 rax
98 %define size_offset rax
99 %define start_offset rax
100
101 %define tmp3 arg1
102
103 %define extra_blocks arg2
104 %define p arg2
105
106 %define tmp4 reg3
107 %define tmp r9
108 %endif
109
110 ; we clobber rsi, rbp; called routine also clobbers rbx, rdi, r12, r13, r14
111 struc STACK
112 _gpr_save: resq 7
113 _rsp_save: resq 1
114 endstruc
115
116 %define APPEND(a,b) a %+ b
117
118 ; JOB* FUNC(MB_MGR_HMAC_SHA_256_OOO *state)
119 ; arg 1 : state
120 MKGLOBAL(FUNC,function,internal)
121 FUNC:
122
123 mov rax, rsp
124 sub rsp, STACK_size
125 and rsp, -32
126 mov [rsp + _gpr_save + 8*0], rbx
127 mov [rsp + _gpr_save + 8*1], rbp
128 mov [rsp + _gpr_save + 8*2], r12
129 mov [rsp + _gpr_save + 8*3], r13
130 mov [rsp + _gpr_save + 8*4], r14
131 %ifndef LINUX
132 mov [rsp + _gpr_save + 8*5], rsi
133 mov [rsp + _gpr_save + 8*6], rdi
134 %endif
135 mov [rsp + _rsp_save], rax ; original SP
136
137 ; if bit (32+3) is set, then all lanes are empty
138 mov unused_lanes, [state + _unused_lanes_sha256]
139 bt unused_lanes, 32+3
140 jc return_null
141
142 ; find a lane with a non-null job
143 xor idx, idx
144
145 %assign I 1
146 %rep 7
147 cmp qword [state + _ldata_sha256 + (I * _HMAC_SHA1_LANE_DATA_size) + _job_in_lane], 0
148 cmovne idx, [rel APPEND(lane_,I)]
149 %assign I (I+1)
150 %endrep
151
152 copy_lane_data:
153 ; copy idx to empty lanes
154 vmovdqa xmm0, [state + _lens_sha256]
155 mov tmp, [state + _args_data_ptr_sha256 + 8*idx]
156
157 %assign I 0
158 %rep 8
159 cmp qword [state + _ldata_sha256 + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
160 jne APPEND(skip_,I)
161 mov [state + _args_data_ptr_sha256 + 8*I], tmp
162 vpor xmm0, xmm0, [rel len_masks + 16*I]
163 APPEND(skip_,I):
164 %assign I (I+1)
165 %endrep
166
167 vmovdqa [state + _lens_sha256 ], xmm0
168
169 vphminposuw xmm1, xmm0
170 vpextrw DWORD(len2), xmm1, 0 ; min value
171 vpextrw DWORD(idx), xmm1, 1 ; min index (0...7)
172 cmp len2, 0
173 je len_is_0
174
175 vpbroadcastw xmm1, xmm1 ; duplicate words across all lanes
176 vpsubw xmm0, xmm0, xmm1
177 vmovdqa [state + _lens_sha256], xmm0
178
179 ; "state" and "args" are the same address, arg1
180 ; len is arg2
181 call sha256_oct_avx2
182 ; state and idx are intact
183
184 len_is_0:
185 ; process completed job "idx"
186 imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
187 lea lane_data, [state + _ldata_sha256 + lane_data]
188 mov DWORD(extra_blocks), [lane_data + _extra_blocks]
189 cmp extra_blocks, 0
190 jne proc_extra_blocks
191 cmp dword [lane_data + _outer_done], 0
192 jne end_loop
193
194 proc_outer:
195 mov dword [lane_data + _outer_done], 1
196 mov DWORD(size_offset), [lane_data + _size_offset]
197 mov qword [lane_data + _extra_block + size_offset], 0
198 mov word [state + _lens_sha256 + 2*idx], 1
199 lea tmp, [lane_data + _outer_block]
200 mov job, [lane_data + _job_in_lane]
201 mov [state + _args_data_ptr_sha256 + 8*idx], tmp
202
203 vmovd xmm0, [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
204 vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], 1
205 vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], 2
206 vpinsrd xmm0, xmm0, [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], 3
207 vpshufb xmm0, xmm0, [rel byteswap]
208 vmovd xmm1, [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE]
209 vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], 1
210 vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], 2
211 %ifndef SHA224
212 vpinsrd xmm1, xmm1, [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], 3
213 %endif
214 vpshufb xmm1, xmm1, [rel byteswap]
215
216 vmovdqa [lane_data + _outer_block], xmm0
217 vmovdqa [lane_data + _outer_block + 4*4], xmm1
218 %ifdef SHA224
219 mov dword [lane_data + _outer_block + 7*4], 0x80
220 %endif
221
222 mov tmp, [job + _auth_key_xor_opad]
223 vmovdqu xmm0, [tmp]
224 vmovdqu xmm1, [tmp + 4*4]
225 vmovd [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE], xmm0
226 vpextrd [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE], xmm0, 1
227 vpextrd [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE], xmm0, 2
228 vpextrd [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE], xmm0, 3
229 vmovd [state + _args_digest_sha256 + 4*idx + 4*SHA256_DIGEST_ROW_SIZE], xmm1
230 vpextrd [state + _args_digest_sha256 + 4*idx + 5*SHA256_DIGEST_ROW_SIZE], xmm1, 1
231 vpextrd [state + _args_digest_sha256 + 4*idx + 6*SHA256_DIGEST_ROW_SIZE], xmm1, 2
232 vpextrd [state + _args_digest_sha256 + 4*idx + 7*SHA256_DIGEST_ROW_SIZE], xmm1, 3
233 jmp copy_lane_data
234
235 align 16
236 proc_extra_blocks:
237 mov DWORD(start_offset), [lane_data + _start_offset]
238 mov [state + _lens_sha256 + 2*idx], WORD(extra_blocks)
239 lea tmp, [lane_data + _extra_block + start_offset]
240 mov [state + _args_data_ptr_sha256 + 8*idx], tmp
241 mov dword [lane_data + _extra_blocks], 0
242 jmp copy_lane_data
243
244 return_null:
245 xor job_rax, job_rax
246 jmp return
247
248 align 16
249 end_loop:
250 mov job_rax, [lane_data + _job_in_lane]
251 mov qword [lane_data + _job_in_lane], 0
252 or dword [job_rax + _status], STS_COMPLETED_HMAC
253 mov unused_lanes, [state + _unused_lanes_sha256]
254 shl unused_lanes, 4
255 or unused_lanes, idx
256 mov [state + _unused_lanes_sha256], unused_lanes
257
258 mov p, [job_rax + _auth_tag_output]
259
260 ; copy SHA224=14bytes and SHA256=16bytes
261 mov DWORD(tmp), [state + _args_digest_sha256 + 4*idx + 0*SHA256_DIGEST_ROW_SIZE]
262 mov DWORD(tmp2), [state + _args_digest_sha256 + 4*idx + 1*SHA256_DIGEST_ROW_SIZE]
263 mov DWORD(tmp4), [state + _args_digest_sha256 + 4*idx + 2*SHA256_DIGEST_ROW_SIZE]
264 mov DWORD(tmp5), [state + _args_digest_sha256 + 4*idx + 3*SHA256_DIGEST_ROW_SIZE]
265
266 bswap DWORD(tmp)
267 bswap DWORD(tmp2)
268 bswap DWORD(tmp4)
269 bswap DWORD(tmp5)
270 mov [p + 0*4], DWORD(tmp)
271 mov [p + 1*4], DWORD(tmp2)
272 mov [p + 2*4], DWORD(tmp4)
273
274 %ifdef SHA224
275 mov [p + 3*4], WORD(tmp5)
276 %else
277 mov [p + 3*4], DWORD(tmp5)
278 %endif
279
280 return:
281 vzeroupper
282
283 mov rbx, [rsp + _gpr_save + 8*0]
284 mov rbp, [rsp + _gpr_save + 8*1]
285 mov r12, [rsp + _gpr_save + 8*2]
286 mov r13, [rsp + _gpr_save + 8*3]
287 mov r14, [rsp + _gpr_save + 8*4]
288 %ifndef LINUX
289 mov rsi, [rsp + _gpr_save + 8*5]
290 mov rdi, [rsp + _gpr_save + 8*6]
291 %endif
292 mov rsp, [rsp + _rsp_save] ; original SP
293
294 ret
295
296 %ifdef LINUX
297 section .note.GNU-stack noalloc noexec nowrite progbits
298 %endif