]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/intel-ipsec-mb/avx/mb_mgr_hmac_flush_avx.asm
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / intel-ipsec-mb / avx / mb_mgr_hmac_flush_avx.asm
1 ;;
2 ;; Copyright (c) 2012-2018, Intel Corporation
3 ;;
4 ;; Redistribution and use in source and binary forms, with or without
5 ;; modification, are permitted provided that the following conditions are met:
6 ;;
7 ;; * Redistributions of source code must retain the above copyright notice,
8 ;; this list of conditions and the following disclaimer.
9 ;; * Redistributions in binary form must reproduce the above copyright
10 ;; notice, this list of conditions and the following disclaimer in the
11 ;; documentation and/or other materials provided with the distribution.
12 ;; * Neither the name of Intel Corporation nor the names of its contributors
13 ;; may be used to endorse or promote products derived from this software
14 ;; without specific prior written permission.
15 ;;
16 ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 ;;
27
28 %include "os.asm"
29 %include "job_aes_hmac.asm"
30 %include "mb_mgr_datastruct.asm"
31 %include "reg_sizes.asm"
32
33 extern sha1_mult_avx
34
35 section .data
36 default rel
37
38 align 16
39 byteswap: ;ddq 0x0c0d0e0f08090a0b0405060700010203
40 dq 0x0405060700010203, 0x0c0d0e0f08090a0b
41 x80: ;ddq 0x00000000000000000000000000000080
42 dq 0x0000000000000080, 0x0000000000000000
43 x00: ;ddq 0x00000000000000000000000000000000
44 dq 0x0000000000000000, 0x0000000000000000
45
46 len_masks:
47 ;ddq 0x0000000000000000000000000000FFFF
48 dq 0x000000000000FFFF, 0x0000000000000000
49 ;ddq 0x000000000000000000000000FFFF0000
50 dq 0x00000000FFFF0000, 0x0000000000000000
51 ;ddq 0x00000000000000000000FFFF00000000
52 dq 0x0000FFFF00000000, 0x0000000000000000
53 ;ddq 0x0000000000000000FFFF000000000000
54 dq 0xFFFF000000000000, 0x0000000000000000
55 one: dq 1
56 two: dq 2
57 three: dq 3
58
59 section .text
60
61 %if 1
62 %ifdef LINUX
63 %define arg1 rdi
64 %define arg2 rsi
65 %else
66 %define arg1 rcx
67 %define arg2 rdx
68 %endif
69
70 %define state arg1
71 %define job arg2
72 %define len2 arg2
73
74
75 ; idx needs to be in rbx, rbp, r12-r15
76 %define idx rbp
77
78 %define unused_lanes rbx
79 %define lane_data rbx
80 %define tmp2 rbx
81
82 %define job_rax rax
83 %define tmp1 rax
84 %define size_offset rax
85 %define tmp rax
86 %define start_offset rax
87
88 %define tmp3 arg1
89
90 %define extra_blocks arg2
91 %define p arg2
92
93 %define tmp4 r8
94
95 %endif
96
97 ; This routine clobbers rbx, rbp
98 struc STACK
99 _gpr_save: resq 2
100 _rsp_save: resq 1
101 endstruc
102
103 %define APPEND(a,b) a %+ b
104
105 ; JOB* flush_job_hmac_avx(MB_MGR_HMAC_SHA_1_OOO *state)
106 ; arg 1 : rcx : state
107 MKGLOBAL(flush_job_hmac_avx,function,internal)
108 flush_job_hmac_avx:
109
110 mov rax, rsp
111 sub rsp, STACK_size
112 and rsp, -16
113
114 mov [rsp + _gpr_save + 8*0], rbx
115 mov [rsp + _gpr_save + 8*1], rbp
116 mov [rsp + _rsp_save], rax ; original SP
117
118 mov unused_lanes, [state + _unused_lanes]
119 bt unused_lanes, 32+7
120 jc return_null
121
122 ; find a lane with a non-null job
123 xor idx, idx
124 cmp qword [state + _ldata + 1 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
125 cmovne idx, [rel one]
126 cmp qword [state + _ldata + 2 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
127 cmovne idx, [rel two]
128 cmp qword [state + _ldata + 3 * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
129 cmovne idx, [rel three]
130
131 copy_lane_data:
132 ; copy valid lane (idx) to empty lanes
133 vmovdqa xmm0, [state + _lens]
134 mov tmp, [state + _args_data_ptr + PTR_SZ*idx]
135
136 %assign I 0
137 %rep 4
138 cmp qword [state + _ldata + I * _HMAC_SHA1_LANE_DATA_size + _job_in_lane], 0
139 jne APPEND(skip_,I)
140 mov [state + _args_data_ptr + PTR_SZ*I], tmp
141 vpor xmm0, xmm0, [rel len_masks + 16*I]
142 APPEND(skip_,I):
143 %assign I (I+1)
144 %endrep
145
146 vmovdqa [state + _lens], xmm0
147
148 vphminposuw xmm1, xmm0
149 vpextrw DWORD(len2), xmm1, 0 ; min value
150 vpextrw DWORD(idx), xmm1, 1 ; min index (0...3)
151 cmp len2, 0
152 je len_is_0
153
154 vpshuflw xmm1, xmm1, 0
155 vpsubw xmm0, xmm0, xmm1
156 vmovdqa [state + _lens], xmm0
157
158 ; "state" and "args" are the same address, arg1
159 ; len is arg2
160 call sha1_mult_avx
161 ; state is intact
162
163 len_is_0:
164 ; process completed job "idx"
165 imul lane_data, idx, _HMAC_SHA1_LANE_DATA_size
166 lea lane_data, [state + _ldata + lane_data]
167 mov DWORD(extra_blocks), [lane_data + _extra_blocks]
168 cmp extra_blocks, 0
169 jne proc_extra_blocks
170 cmp dword [lane_data + _outer_done], 0
171 jne end_loop
172
173 proc_outer:
174 mov dword [lane_data + _outer_done], 1
175 mov DWORD(size_offset), [lane_data + _size_offset]
176 mov qword [lane_data + _extra_block + size_offset], 0
177 mov word [state + _lens + 2*idx], 1
178 lea tmp, [lane_data + _outer_block]
179 mov job, [lane_data + _job_in_lane]
180 mov [state + _args_data_ptr + PTR_SZ*idx], tmp
181
182 ;; idx determines which column
183 ;; read off from consecutive rows
184 vmovd xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
185 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], 1
186 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], 2
187 vpinsrd xmm0, xmm0, [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], 3
188 vpshufb xmm0, xmm0, [rel byteswap]
189 mov DWORD(tmp), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE]
190 bswap DWORD(tmp)
191 vmovdqa [lane_data + _outer_block], xmm0
192 mov [lane_data + _outer_block + 4*4], DWORD(tmp)
193
194 mov tmp, [job + _auth_key_xor_opad]
195 vmovdqu xmm0, [tmp]
196 mov DWORD(tmp), [tmp + 4*4]
197 vmovd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE], xmm0
198 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE], xmm0, 1
199 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE], xmm0, 2
200 vpextrd [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 3*SHA1_DIGEST_ROW_SIZE], xmm0, 3
201 mov [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 4*SHA1_DIGEST_ROW_SIZE], DWORD(tmp)
202 jmp copy_lane_data
203
204 align 16
205 proc_extra_blocks:
206 mov DWORD(start_offset), [lane_data + _start_offset]
207 mov [state + _lens + 2*idx], WORD(extra_blocks)
208 lea tmp, [lane_data + _extra_block + start_offset]
209 mov [state + _args_data_ptr + PTR_SZ*idx], tmp
210 mov dword [lane_data + _extra_blocks], 0
211 jmp copy_lane_data
212
213 return_null:
214 xor job_rax, job_rax
215 jmp return
216
217 align 16
218 end_loop:
219 mov job_rax, [lane_data + _job_in_lane]
220 mov qword [lane_data + _job_in_lane], 0
221 or dword [job_rax + _status], STS_COMPLETED_HMAC
222 mov unused_lanes, [state + _unused_lanes]
223 shl unused_lanes, 8
224 or unused_lanes, idx
225 mov [state + _unused_lanes], unused_lanes
226
227 mov p, [job_rax + _auth_tag_output]
228
229 ; copy 12 bytes
230 mov DWORD(tmp2), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 0*SHA1_DIGEST_ROW_SIZE]
231 mov DWORD(tmp4), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 1*SHA1_DIGEST_ROW_SIZE]
232 mov DWORD(tmp3), [state + _args_digest + SHA1_DIGEST_WORD_SIZE*idx + 2*SHA1_DIGEST_ROW_SIZE]
233 bswap DWORD(tmp2)
234 bswap DWORD(tmp4)
235 bswap DWORD(tmp3)
236 mov [p + 0*4], DWORD(tmp2)
237 mov [p + 1*4], DWORD(tmp4)
238 mov [p + 2*4], DWORD(tmp3)
239
240 return:
241
242 mov rbx, [rsp + _gpr_save + 8*0]
243 mov rbp, [rsp + _gpr_save + 8*1]
244 mov rsp, [rsp + _rsp_save] ; original SP
245
246 ret
247
248 %ifdef LINUX
249 section .note.GNU-stack noalloc noexec nowrite progbits
250 %endif