]>
Commit | Line | Data |
---|---|---|
4d6d6a2c JG |
1 | /* |
2 | * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64) | |
3 | * | |
4 | * Copyright (C) 2012 Johannes Goetzfried | |
5 | * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> | |
6 | * | |
ddaea786 JK |
7 | * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> |
8 | * | |
4d6d6a2c JG |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | |
22 | * USA | |
23 | * | |
24 | */ | |
25 | ||
e17e209e JK |
26 | #include <linux/linkage.h> |
27 | ||
4d6d6a2c | 28 | .file "cast5-avx-x86_64-asm_64.S" |
4d6d6a2c | 29 | |
044ab525 JK |
30 | .extern cast_s1 |
31 | .extern cast_s2 | |
32 | .extern cast_s3 | |
33 | .extern cast_s4 | |
4d6d6a2c JG |
34 | |
35 | /* structure of crypto context */ | |
36 | #define km 0 | |
37 | #define kr (16*4) | |
38 | #define rr ((16*4)+16) | |
39 | ||
40 | /* s-boxes */ | |
044ab525 JK |
41 | #define s1 cast_s1 |
42 | #define s2 cast_s2 | |
43 | #define s3 cast_s3 | |
44 | #define s4 cast_s4 | |
4d6d6a2c JG |
45 | |
46 | /********************************************************************** | |
47 | 16-way AVX cast5 | |
48 | **********************************************************************/ | |
49 | #define CTX %rdi | |
50 | ||
51 | #define RL1 %xmm0 | |
52 | #define RR1 %xmm1 | |
53 | #define RL2 %xmm2 | |
54 | #define RR2 %xmm3 | |
55 | #define RL3 %xmm4 | |
56 | #define RR3 %xmm5 | |
57 | #define RL4 %xmm6 | |
58 | #define RR4 %xmm7 | |
59 | ||
60 | #define RX %xmm8 | |
61 | ||
62 | #define RKM %xmm9 | |
ddaea786 JK |
63 | #define RKR %xmm10 |
64 | #define RKRF %xmm11 | |
65 | #define RKRR %xmm12 | |
66 | ||
67 | #define R32 %xmm13 | |
68 | #define R1ST %xmm14 | |
4d6d6a2c | 69 | |
ddaea786 | 70 | #define RTMP %xmm15 |
4d6d6a2c | 71 | |
ddaea786 JK |
72 | #define RID1 %rbp |
73 | #define RID1d %ebp | |
74 | #define RID2 %rsi | |
75 | #define RID2d %esi | |
4d6d6a2c JG |
76 | |
77 | #define RGI1 %rdx | |
78 | #define RGI1bl %dl | |
79 | #define RGI1bh %dh | |
80 | #define RGI2 %rcx | |
81 | #define RGI2bl %cl | |
82 | #define RGI2bh %ch | |
83 | ||
ddaea786 JK |
84 | #define RGI3 %rax |
85 | #define RGI3bl %al | |
86 | #define RGI3bh %ah | |
87 | #define RGI4 %rbx | |
88 | #define RGI4bl %bl | |
89 | #define RGI4bh %bh | |
90 | ||
4d6d6a2c JG |
91 | #define RFS1 %r8 |
92 | #define RFS1d %r8d | |
93 | #define RFS2 %r9 | |
94 | #define RFS2d %r9d | |
95 | #define RFS3 %r10 | |
96 | #define RFS3d %r10d | |
97 | ||
98 | ||
ddaea786 JK |
99 | #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \ |
100 | movzbl src ## bh, RID1d; \ | |
101 | movzbl src ## bl, RID2d; \ | |
102 | shrq $16, src; \ | |
4d6d6a2c JG |
103 | movl s1(, RID1, 4), dst ## d; \ |
104 | op1 s2(, RID2, 4), dst ## d; \ | |
ddaea786 JK |
105 | movzbl src ## bh, RID1d; \ |
106 | movzbl src ## bl, RID2d; \ | |
107 | interleave_op(il_reg); \ | |
4d6d6a2c JG |
108 | op2 s3(, RID1, 4), dst ## d; \ |
109 | op3 s4(, RID2, 4), dst ## d; | |
110 | ||
ddaea786 JK |
111 | #define dummy(d) /* do nothing */ |
112 | ||
113 | #define shr_next(reg) \ | |
114 | shrq $16, reg; | |
115 | ||
116 | #define F_head(a, x, gi1, gi2, op0) \ | |
4d6d6a2c | 117 | op0 a, RKM, x; \ |
ddaea786 JK |
118 | vpslld RKRF, x, RTMP; \ |
119 | vpsrld RKRR, x, x; \ | |
4d6d6a2c JG |
120 | vpor RTMP, x, x; \ |
121 | \ | |
ddaea786 JK |
122 | vmovq x, gi1; \ |
123 | vpextrq $1, x, gi2; | |
124 | ||
125 | #define F_tail(a, x, gi1, gi2, op1, op2, op3) \ | |
126 | lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \ | |
127 | lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \ | |
4d6d6a2c | 128 | \ |
ddaea786 JK |
129 | lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \ |
130 | shlq $32, RFS2; \ | |
131 | orq RFS1, RFS2; \ | |
132 | lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \ | |
133 | shlq $32, RFS1; \ | |
134 | orq RFS1, RFS3; \ | |
4d6d6a2c | 135 | \ |
ddaea786 | 136 | vmovq RFS2, x; \ |
4d6d6a2c JG |
137 | vpinsrq $1, RFS3, x, x; |
138 | ||
ddaea786 JK |
139 | #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ |
140 | F_head(b1, RX, RGI1, RGI2, op0); \ | |
141 | F_head(b2, RX, RGI3, RGI4, op0); \ | |
142 | \ | |
143 | F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \ | |
144 | F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \ | |
145 | \ | |
146 | vpxor a1, RX, a1; \ | |
147 | vpxor a2, RTMP, a2; | |
148 | ||
149 | #define F1_2(a1, b1, a2, b2) \ | |
150 | F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl) | |
151 | #define F2_2(a1, b1, a2, b2) \ | |
152 | F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl) | |
153 | #define F3_2(a1, b1, a2, b2) \ | |
154 | F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl) | |
4d6d6a2c | 155 | |
ddaea786 JK |
156 | #define subround(a1, b1, a2, b2, f) \ |
157 | F ## f ## _2(a1, b1, a2, b2); | |
4d6d6a2c JG |
158 | |
159 | #define round(l, r, n, f) \ | |
160 | vbroadcastss (km+(4*n))(CTX), RKM; \ | |
ddaea786 | 161 | vpand R1ST, RKR, RKRF; \ |
4d6d6a2c | 162 | vpsubq RKRF, R32, RKRR; \ |
ddaea786 JK |
163 | vpsrldq $1, RKR, RKR; \ |
164 | subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \ | |
165 | subround(l ## 3, r ## 3, l ## 4, r ## 4, f); | |
166 | ||
167 | #define enc_preload_rkr() \ | |
168 | vbroadcastss .L16_mask, RKR; \ | |
169 | /* add 16-bit rotation to key rotations (mod 32) */ \ | |
170 | vpxor kr(CTX), RKR, RKR; | |
4d6d6a2c | 171 | |
ddaea786 JK |
172 | #define dec_preload_rkr() \ |
173 | vbroadcastss .L16_mask, RKR; \ | |
174 | /* add 16-bit rotation to key rotations (mod 32) */ \ | |
175 | vpxor kr(CTX), RKR, RKR; \ | |
176 | vpshufb .Lbswap128_mask, RKR, RKR; | |
4d6d6a2c JG |
177 | |
178 | #define transpose_2x4(x0, x1, t0, t1) \ | |
179 | vpunpckldq x1, x0, t0; \ | |
180 | vpunpckhdq x1, x0, t1; \ | |
181 | \ | |
182 | vpunpcklqdq t1, t0, x0; \ | |
183 | vpunpckhqdq t1, t0, x1; | |
184 | ||
c12ab20b | 185 | #define inpack_blocks(x0, x1, t0, t1, rmask) \ |
ddaea786 JK |
186 | vpshufb rmask, x0, x0; \ |
187 | vpshufb rmask, x1, x1; \ | |
4d6d6a2c JG |
188 | \ |
189 | transpose_2x4(x0, x1, t0, t1) | |
190 | ||
c12ab20b | 191 | #define outunpack_blocks(x0, x1, t0, t1, rmask) \ |
4d6d6a2c JG |
192 | transpose_2x4(x0, x1, t0, t1) \ |
193 | \ | |
ddaea786 | 194 | vpshufb rmask, x0, x0; \ |
c12ab20b | 195 | vpshufb rmask, x1, x1; |
4d6d6a2c | 196 | |
ddaea786 JK |
197 | .data |
198 | ||
4d6d6a2c JG |
199 | .align 16 |
200 | .Lbswap_mask: | |
201 | .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 | |
ddaea786 JK |
202 | .Lbswap128_mask: |
203 | .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 | |
c12ab20b JK |
204 | .Lbswap_iv_mask: |
205 | .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0 | |
ddaea786 JK |
206 | .L16_mask: |
207 | .byte 16, 16, 16, 16 | |
4d6d6a2c | 208 | .L32_mask: |
ddaea786 JK |
209 | .byte 32, 0, 0, 0 |
210 | .Lfirst_mask: | |
211 | .byte 0x1f, 0, 0, 0 | |
212 | ||
213 | .text | |
4d6d6a2c JG |
214 | |
215 | .align 16 | |
c12ab20b | 216 | __cast5_enc_blk16: |
4d6d6a2c JG |
217 | /* input: |
218 | * %rdi: ctx, CTX | |
c12ab20b JK |
219 | * RL1: blocks 1 and 2 |
220 | * RR1: blocks 3 and 4 | |
221 | * RL2: blocks 5 and 6 | |
222 | * RR2: blocks 7 and 8 | |
223 | * RL3: blocks 9 and 10 | |
224 | * RR3: blocks 11 and 12 | |
225 | * RL4: blocks 13 and 14 | |
226 | * RR4: blocks 15 and 16 | |
227 | * output: | |
228 | * RL1: encrypted blocks 1 and 2 | |
229 | * RR1: encrypted blocks 3 and 4 | |
230 | * RL2: encrypted blocks 5 and 6 | |
231 | * RR2: encrypted blocks 7 and 8 | |
232 | * RL3: encrypted blocks 9 and 10 | |
233 | * RR3: encrypted blocks 11 and 12 | |
234 | * RL4: encrypted blocks 13 and 14 | |
235 | * RR4: encrypted blocks 15 and 16 | |
4d6d6a2c JG |
236 | */ |
237 | ||
ddaea786 | 238 | pushq %rbp; |
4d6d6a2c | 239 | pushq %rbx; |
4d6d6a2c | 240 | |
ddaea786 JK |
241 | vmovdqa .Lbswap_mask, RKM; |
242 | vmovd .Lfirst_mask, R1ST; | |
243 | vmovd .L32_mask, R32; | |
244 | enc_preload_rkr(); | |
4d6d6a2c | 245 | |
c12ab20b JK |
246 | inpack_blocks(RL1, RR1, RTMP, RX, RKM); |
247 | inpack_blocks(RL2, RR2, RTMP, RX, RKM); | |
248 | inpack_blocks(RL3, RR3, RTMP, RX, RKM); | |
249 | inpack_blocks(RL4, RR4, RTMP, RX, RKM); | |
4d6d6a2c JG |
250 | |
251 | round(RL, RR, 0, 1); | |
252 | round(RR, RL, 1, 2); | |
253 | round(RL, RR, 2, 3); | |
254 | round(RR, RL, 3, 1); | |
255 | round(RL, RR, 4, 2); | |
256 | round(RR, RL, 5, 3); | |
257 | round(RL, RR, 6, 1); | |
258 | round(RR, RL, 7, 2); | |
259 | round(RL, RR, 8, 3); | |
260 | round(RR, RL, 9, 1); | |
261 | round(RL, RR, 10, 2); | |
262 | round(RR, RL, 11, 3); | |
263 | ||
ddaea786 JK |
264 | movzbl rr(CTX), %eax; |
265 | testl %eax, %eax; | |
e17e209e | 266 | jnz .L__skip_enc; |
4d6d6a2c JG |
267 | |
268 | round(RL, RR, 12, 1); | |
269 | round(RR, RL, 13, 2); | |
270 | round(RL, RR, 14, 3); | |
271 | round(RR, RL, 15, 1); | |
272 | ||
e17e209e | 273 | .L__skip_enc: |
4d6d6a2c | 274 | popq %rbx; |
ddaea786 JK |
275 | popq %rbp; |
276 | ||
277 | vmovdqa .Lbswap_mask, RKM; | |
4d6d6a2c | 278 | |
c12ab20b JK |
279 | outunpack_blocks(RR1, RL1, RTMP, RX, RKM); |
280 | outunpack_blocks(RR2, RL2, RTMP, RX, RKM); | |
281 | outunpack_blocks(RR3, RL3, RTMP, RX, RKM); | |
282 | outunpack_blocks(RR4, RL4, RTMP, RX, RKM); | |
4d6d6a2c JG |
283 | |
284 | ret; | |
e17e209e | 285 | ENDPROC(__cast5_enc_blk16) |
4d6d6a2c JG |
286 | |
287 | .align 16 | |
c12ab20b | 288 | __cast5_dec_blk16: |
4d6d6a2c JG |
289 | /* input: |
290 | * %rdi: ctx, CTX | |
c12ab20b JK |
291 | * RL1: encrypted blocks 1 and 2 |
292 | * RR1: encrypted blocks 3 and 4 | |
293 | * RL2: encrypted blocks 5 and 6 | |
294 | * RR2: encrypted blocks 7 and 8 | |
295 | * RL3: encrypted blocks 9 and 10 | |
296 | * RR3: encrypted blocks 11 and 12 | |
297 | * RL4: encrypted blocks 13 and 14 | |
298 | * RR4: encrypted blocks 15 and 16 | |
299 | * output: | |
300 | * RL1: decrypted blocks 1 and 2 | |
301 | * RR1: decrypted blocks 3 and 4 | |
302 | * RL2: decrypted blocks 5 and 6 | |
303 | * RR2: decrypted blocks 7 and 8 | |
304 | * RL3: decrypted blocks 9 and 10 | |
305 | * RR3: decrypted blocks 11 and 12 | |
306 | * RL4: decrypted blocks 13 and 14 | |
307 | * RR4: decrypted blocks 15 and 16 | |
4d6d6a2c JG |
308 | */ |
309 | ||
ddaea786 | 310 | pushq %rbp; |
4d6d6a2c JG |
311 | pushq %rbx; |
312 | ||
ddaea786 JK |
313 | vmovdqa .Lbswap_mask, RKM; |
314 | vmovd .Lfirst_mask, R1ST; | |
315 | vmovd .L32_mask, R32; | |
316 | dec_preload_rkr(); | |
4d6d6a2c | 317 | |
c12ab20b JK |
318 | inpack_blocks(RL1, RR1, RTMP, RX, RKM); |
319 | inpack_blocks(RL2, RR2, RTMP, RX, RKM); | |
320 | inpack_blocks(RL3, RR3, RTMP, RX, RKM); | |
321 | inpack_blocks(RL4, RR4, RTMP, RX, RKM); | |
4d6d6a2c | 322 | |
ddaea786 JK |
323 | movzbl rr(CTX), %eax; |
324 | testl %eax, %eax; | |
e17e209e | 325 | jnz .L__skip_dec; |
4d6d6a2c JG |
326 | |
327 | round(RL, RR, 15, 1); | |
328 | round(RR, RL, 14, 3); | |
329 | round(RL, RR, 13, 2); | |
330 | round(RR, RL, 12, 1); | |
331 | ||
e17e209e | 332 | .L__dec_tail: |
4d6d6a2c JG |
333 | round(RL, RR, 11, 3); |
334 | round(RR, RL, 10, 2); | |
335 | round(RL, RR, 9, 1); | |
336 | round(RR, RL, 8, 3); | |
337 | round(RL, RR, 7, 2); | |
338 | round(RR, RL, 6, 1); | |
339 | round(RL, RR, 5, 3); | |
340 | round(RR, RL, 4, 2); | |
341 | round(RL, RR, 3, 1); | |
342 | round(RR, RL, 2, 3); | |
343 | round(RL, RR, 1, 2); | |
344 | round(RR, RL, 0, 1); | |
345 | ||
ddaea786 | 346 | vmovdqa .Lbswap_mask, RKM; |
4d6d6a2c | 347 | popq %rbx; |
ddaea786 | 348 | popq %rbp; |
4d6d6a2c | 349 | |
c12ab20b JK |
350 | outunpack_blocks(RR1, RL1, RTMP, RX, RKM); |
351 | outunpack_blocks(RR2, RL2, RTMP, RX, RKM); | |
352 | outunpack_blocks(RR3, RL3, RTMP, RX, RKM); | |
353 | outunpack_blocks(RR4, RL4, RTMP, RX, RKM); | |
4d6d6a2c JG |
354 | |
355 | ret; | |
ddaea786 | 356 | |
e17e209e | 357 | .L__skip_dec: |
ddaea786 | 358 | vpsrldq $4, RKR, RKR; |
e17e209e JK |
359 | jmp .L__dec_tail; |
360 | ENDPROC(__cast5_dec_blk16) | |
c12ab20b | 361 | |
e17e209e | 362 | ENTRY(cast5_ecb_enc_16way) |
c12ab20b JK |
363 | /* input: |
364 | * %rdi: ctx, CTX | |
365 | * %rsi: dst | |
366 | * %rdx: src | |
367 | */ | |
368 | ||
369 | movq %rsi, %r11; | |
370 | ||
371 | vmovdqu (0*4*4)(%rdx), RL1; | |
372 | vmovdqu (1*4*4)(%rdx), RR1; | |
373 | vmovdqu (2*4*4)(%rdx), RL2; | |
374 | vmovdqu (3*4*4)(%rdx), RR2; | |
375 | vmovdqu (4*4*4)(%rdx), RL3; | |
376 | vmovdqu (5*4*4)(%rdx), RR3; | |
377 | vmovdqu (6*4*4)(%rdx), RL4; | |
378 | vmovdqu (7*4*4)(%rdx), RR4; | |
379 | ||
380 | call __cast5_enc_blk16; | |
381 | ||
382 | vmovdqu RR1, (0*4*4)(%r11); | |
383 | vmovdqu RL1, (1*4*4)(%r11); | |
384 | vmovdqu RR2, (2*4*4)(%r11); | |
385 | vmovdqu RL2, (3*4*4)(%r11); | |
386 | vmovdqu RR3, (4*4*4)(%r11); | |
387 | vmovdqu RL3, (5*4*4)(%r11); | |
388 | vmovdqu RR4, (6*4*4)(%r11); | |
389 | vmovdqu RL4, (7*4*4)(%r11); | |
390 | ||
391 | ret; | |
e17e209e | 392 | ENDPROC(cast5_ecb_enc_16way) |
c12ab20b | 393 | |
e17e209e | 394 | ENTRY(cast5_ecb_dec_16way) |
c12ab20b JK |
395 | /* input: |
396 | * %rdi: ctx, CTX | |
397 | * %rsi: dst | |
398 | * %rdx: src | |
399 | */ | |
400 | ||
401 | movq %rsi, %r11; | |
402 | ||
403 | vmovdqu (0*4*4)(%rdx), RL1; | |
404 | vmovdqu (1*4*4)(%rdx), RR1; | |
405 | vmovdqu (2*4*4)(%rdx), RL2; | |
406 | vmovdqu (3*4*4)(%rdx), RR2; | |
407 | vmovdqu (4*4*4)(%rdx), RL3; | |
408 | vmovdqu (5*4*4)(%rdx), RR3; | |
409 | vmovdqu (6*4*4)(%rdx), RL4; | |
410 | vmovdqu (7*4*4)(%rdx), RR4; | |
411 | ||
412 | call __cast5_dec_blk16; | |
413 | ||
414 | vmovdqu RR1, (0*4*4)(%r11); | |
415 | vmovdqu RL1, (1*4*4)(%r11); | |
416 | vmovdqu RR2, (2*4*4)(%r11); | |
417 | vmovdqu RL2, (3*4*4)(%r11); | |
418 | vmovdqu RR3, (4*4*4)(%r11); | |
419 | vmovdqu RL3, (5*4*4)(%r11); | |
420 | vmovdqu RR4, (6*4*4)(%r11); | |
421 | vmovdqu RL4, (7*4*4)(%r11); | |
422 | ||
423 | ret; | |
e17e209e | 424 | ENDPROC(cast5_ecb_dec_16way) |
c12ab20b | 425 | |
e17e209e | 426 | ENTRY(cast5_cbc_dec_16way) |
c12ab20b JK |
427 | /* input: |
428 | * %rdi: ctx, CTX | |
429 | * %rsi: dst | |
430 | * %rdx: src | |
431 | */ | |
432 | ||
433 | pushq %r12; | |
434 | ||
435 | movq %rsi, %r11; | |
436 | movq %rdx, %r12; | |
437 | ||
438 | vmovdqu (0*16)(%rdx), RL1; | |
439 | vmovdqu (1*16)(%rdx), RR1; | |
440 | vmovdqu (2*16)(%rdx), RL2; | |
441 | vmovdqu (3*16)(%rdx), RR2; | |
442 | vmovdqu (4*16)(%rdx), RL3; | |
443 | vmovdqu (5*16)(%rdx), RR3; | |
444 | vmovdqu (6*16)(%rdx), RL4; | |
445 | vmovdqu (7*16)(%rdx), RR4; | |
446 | ||
447 | call __cast5_dec_blk16; | |
448 | ||
449 | /* xor with src */ | |
450 | vmovq (%r12), RX; | |
451 | vpshufd $0x4f, RX, RX; | |
452 | vpxor RX, RR1, RR1; | |
453 | vpxor 0*16+8(%r12), RL1, RL1; | |
454 | vpxor 1*16+8(%r12), RR2, RR2; | |
455 | vpxor 2*16+8(%r12), RL2, RL2; | |
456 | vpxor 3*16+8(%r12), RR3, RR3; | |
457 | vpxor 4*16+8(%r12), RL3, RL3; | |
458 | vpxor 5*16+8(%r12), RR4, RR4; | |
459 | vpxor 6*16+8(%r12), RL4, RL4; | |
460 | ||
461 | vmovdqu RR1, (0*16)(%r11); | |
462 | vmovdqu RL1, (1*16)(%r11); | |
463 | vmovdqu RR2, (2*16)(%r11); | |
464 | vmovdqu RL2, (3*16)(%r11); | |
465 | vmovdqu RR3, (4*16)(%r11); | |
466 | vmovdqu RL3, (5*16)(%r11); | |
467 | vmovdqu RR4, (6*16)(%r11); | |
468 | vmovdqu RL4, (7*16)(%r11); | |
469 | ||
470 | popq %r12; | |
471 | ||
472 | ret; | |
e17e209e | 473 | ENDPROC(cast5_cbc_dec_16way) |
c12ab20b | 474 | |
e17e209e | 475 | ENTRY(cast5_ctr_16way) |
c12ab20b JK |
476 | /* input: |
477 | * %rdi: ctx, CTX | |
478 | * %rsi: dst | |
479 | * %rdx: src | |
480 | * %rcx: iv (big endian, 64bit) | |
481 | */ | |
482 | ||
483 | pushq %r12; | |
484 | ||
485 | movq %rsi, %r11; | |
486 | movq %rdx, %r12; | |
487 | ||
488 | vpcmpeqd RTMP, RTMP, RTMP; | |
489 | vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ | |
490 | ||
491 | vpcmpeqd RKR, RKR, RKR; | |
492 | vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */ | |
493 | vmovdqa .Lbswap_iv_mask, R1ST; | |
494 | vmovdqa .Lbswap128_mask, RKM; | |
495 | ||
496 | /* load IV and byteswap */ | |
497 | vmovq (%rcx), RX; | |
498 | vpshufb R1ST, RX, RX; | |
499 | ||
500 | /* construct IVs */ | |
501 | vpsubq RTMP, RX, RX; /* le: IV1, IV0 */ | |
502 | vpshufb RKM, RX, RL1; /* be: IV0, IV1 */ | |
503 | vpsubq RKR, RX, RX; | |
504 | vpshufb RKM, RX, RR1; /* be: IV2, IV3 */ | |
505 | vpsubq RKR, RX, RX; | |
506 | vpshufb RKM, RX, RL2; /* be: IV4, IV5 */ | |
507 | vpsubq RKR, RX, RX; | |
508 | vpshufb RKM, RX, RR2; /* be: IV6, IV7 */ | |
509 | vpsubq RKR, RX, RX; | |
510 | vpshufb RKM, RX, RL3; /* be: IV8, IV9 */ | |
511 | vpsubq RKR, RX, RX; | |
512 | vpshufb RKM, RX, RR3; /* be: IV10, IV11 */ | |
513 | vpsubq RKR, RX, RX; | |
514 | vpshufb RKM, RX, RL4; /* be: IV12, IV13 */ | |
515 | vpsubq RKR, RX, RX; | |
516 | vpshufb RKM, RX, RR4; /* be: IV14, IV15 */ | |
517 | ||
518 | /* store last IV */ | |
519 | vpsubq RTMP, RX, RX; /* le: IV16, IV14 */ | |
520 | vpshufb R1ST, RX, RX; /* be: IV16, IV16 */ | |
521 | vmovq RX, (%rcx); | |
522 | ||
523 | call __cast5_enc_blk16; | |
524 | ||
525 | /* dst = src ^ iv */ | |
526 | vpxor (0*16)(%r12), RR1, RR1; | |
527 | vpxor (1*16)(%r12), RL1, RL1; | |
528 | vpxor (2*16)(%r12), RR2, RR2; | |
529 | vpxor (3*16)(%r12), RL2, RL2; | |
530 | vpxor (4*16)(%r12), RR3, RR3; | |
531 | vpxor (5*16)(%r12), RL3, RL3; | |
532 | vpxor (6*16)(%r12), RR4, RR4; | |
533 | vpxor (7*16)(%r12), RL4, RL4; | |
534 | vmovdqu RR1, (0*16)(%r11); | |
535 | vmovdqu RL1, (1*16)(%r11); | |
536 | vmovdqu RR2, (2*16)(%r11); | |
537 | vmovdqu RL2, (3*16)(%r11); | |
538 | vmovdqu RR3, (4*16)(%r11); | |
539 | vmovdqu RL3, (5*16)(%r11); | |
540 | vmovdqu RR4, (6*16)(%r11); | |
541 | vmovdqu RL4, (7*16)(%r11); | |
542 | ||
543 | popq %r12; | |
544 | ||
545 | ret; | |
e17e209e | 546 | ENDPROC(cast5_ctr_16way) |