]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
Merge tag 'kbuild-misc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / crypto / sha1-mb / sha1_mb_mgr_flush_avx2.S
1 /*
2 * Flush routine for SHA1 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * James Guilford <james.guilford@intel.com>
22 * Tim Chen <tim.c.chen@linux.intel.com>
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2014 Intel Corporation.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
46 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54 #include <linux/linkage.h>
55 #include <asm/frame.h>
56 #include "sha1_mb_mgr_datastruct.S"
57
58
59 .extern sha1_x8_avx2
60
61 # LINUX register definitions
62 #define arg1 %rdi
63 #define arg2 %rsi
64
65 # Common definitions
66 #define state arg1
67 #define job arg2
68 #define len2 arg2
69
70 # idx must be a register not clobbered by sha1_x8_avx2
71 #define idx %r8
72 #define DWORD_idx %r8d
73
74 #define unused_lanes %rbx
75 #define lane_data %rbx
76 #define tmp2 %rbx
77 #define tmp2_w %ebx
78
79 #define job_rax %rax
80 #define tmp1 %rax
81 #define size_offset %rax
82 #define tmp %rax
83 #define start_offset %rax
84
85 #define tmp3 %arg1
86
87 #define extra_blocks %arg2
88 #define p %arg2
89
90 .macro LABEL prefix n
91 \prefix\n\():
92 .endm
93
94 .macro JNE_SKIP i
95 jne skip_\i
96 .endm
97
98 .altmacro
99 .macro SET_OFFSET _offset
100 offset = \_offset
101 .endm
102 .noaltmacro
103
104 # JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
105 # arg 1 : rcx : state
106 ENTRY(sha1_mb_mgr_flush_avx2)
107 FRAME_BEGIN
108 push %rbx
109
110 # If bit (32+3) is set, then all lanes are empty
111 mov _unused_lanes(state), unused_lanes
112 bt $32+3, unused_lanes
113 jc return_null
114
115 # find a lane with a non-null job
116 xor idx, idx
117 offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
118 cmpq $0, offset(state)
119 cmovne one(%rip), idx
120 offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
121 cmpq $0, offset(state)
122 cmovne two(%rip), idx
123 offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
124 cmpq $0, offset(state)
125 cmovne three(%rip), idx
126 offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
127 cmpq $0, offset(state)
128 cmovne four(%rip), idx
129 offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
130 cmpq $0, offset(state)
131 cmovne five(%rip), idx
132 offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
133 cmpq $0, offset(state)
134 cmovne six(%rip), idx
135 offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
136 cmpq $0, offset(state)
137 cmovne seven(%rip), idx
138
139 # copy idx to empty lanes
140 copy_lane_data:
141 offset = (_args + _data_ptr)
142 mov offset(state,idx,8), tmp
143
144 I = 0
145 .rep 8
146 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
147 cmpq $0, offset(state)
148 .altmacro
149 JNE_SKIP %I
150 offset = (_args + _data_ptr + 8*I)
151 mov tmp, offset(state)
152 offset = (_lens + 4*I)
153 movl $0xFFFFFFFF, offset(state)
154 LABEL skip_ %I
155 I = (I+1)
156 .noaltmacro
157 .endr
158
159 # Find min length
160 vmovdqa _lens+0*16(state), %xmm0
161 vmovdqa _lens+1*16(state), %xmm1
162
163 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
164 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
165 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
166 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
167 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
168
169 vmovd %xmm2, DWORD_idx
170 mov idx, len2
171 and $0xF, idx
172 shr $4, len2
173 jz len_is_0
174
175 vpand clear_low_nibble(%rip), %xmm2, %xmm2
176 vpshufd $0, %xmm2, %xmm2
177
178 vpsubd %xmm2, %xmm0, %xmm0
179 vpsubd %xmm2, %xmm1, %xmm1
180
181 vmovdqa %xmm0, _lens+0*16(state)
182 vmovdqa %xmm1, _lens+1*16(state)
183
184 # "state" and "args" are the same address, arg1
185 # len is arg2
186 call sha1_x8_avx2
187 # state and idx are intact
188
189
190 len_is_0:
191 # process completed job "idx"
192 imul $_LANE_DATA_size, idx, lane_data
193 lea _ldata(state, lane_data), lane_data
194
195 mov _job_in_lane(lane_data), job_rax
196 movq $0, _job_in_lane(lane_data)
197 movl $STS_COMPLETED, _status(job_rax)
198 mov _unused_lanes(state), unused_lanes
199 shl $4, unused_lanes
200 or idx, unused_lanes
201 mov unused_lanes, _unused_lanes(state)
202
203 movl $0xFFFFFFFF, _lens(state, idx, 4)
204
205 vmovd _args_digest(state , idx, 4) , %xmm0
206 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
207 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
208 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
209 movl _args_digest+4*32(state, idx, 4), tmp2_w
210
211 vmovdqu %xmm0, _result_digest(job_rax)
212 offset = (_result_digest + 1*16)
213 mov tmp2_w, offset(job_rax)
214
215 return:
216 pop %rbx
217 FRAME_END
218 ret
219
220 return_null:
221 xor job_rax, job_rax
222 jmp return
223 ENDPROC(sha1_mb_mgr_flush_avx2)
224
225
226 #################################################################
227
228 .align 16
229 ENTRY(sha1_mb_mgr_get_comp_job_avx2)
230 push %rbx
231
232 ## if bit 32+3 is set, then all lanes are empty
233 mov _unused_lanes(state), unused_lanes
234 bt $(32+3), unused_lanes
235 jc .return_null
236
237 # Find min length
238 vmovdqa _lens(state), %xmm0
239 vmovdqa _lens+1*16(state), %xmm1
240
241 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
242 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
243 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
244 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
245 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
246
247 vmovd %xmm2, DWORD_idx
248 test $~0xF, idx
249 jnz .return_null
250
251 # process completed job "idx"
252 imul $_LANE_DATA_size, idx, lane_data
253 lea _ldata(state, lane_data), lane_data
254
255 mov _job_in_lane(lane_data), job_rax
256 movq $0, _job_in_lane(lane_data)
257 movl $STS_COMPLETED, _status(job_rax)
258 mov _unused_lanes(state), unused_lanes
259 shl $4, unused_lanes
260 or idx, unused_lanes
261 mov unused_lanes, _unused_lanes(state)
262
263 movl $0xFFFFFFFF, _lens(state, idx, 4)
264
265 vmovd _args_digest(state, idx, 4), %xmm0
266 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
267 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
268 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
269 movl _args_digest+4*32(state, idx, 4), tmp2_w
270
271 vmovdqu %xmm0, _result_digest(job_rax)
272 movl tmp2_w, _result_digest+1*16(job_rax)
273
274 pop %rbx
275
276 ret
277
278 .return_null:
279 xor job_rax, job_rax
280 pop %rbx
281 ret
282 ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
283
284 .section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
285 .align 16
286 clear_low_nibble:
287 .octa 0x000000000000000000000000FFFFFFF0
288
289 .section .rodata.cst8, "aM", @progbits, 8
290 .align 8
291 one:
292 .quad 1
293 two:
294 .quad 2
295 three:
296 .quad 3
297 four:
298 .quad 4
299 five:
300 .quad 5
301 six:
302 .quad 6
303 seven:
304 .quad 7