]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/crypto/cast5-avx-x86_64-asm_64.S
crypto: x86 - make constants readonly, allow linker to merge them
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / crypto / cast5-avx-x86_64-asm_64.S
1 /*
2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26 #include <linux/linkage.h>
27 #include <asm/frame.h>
28
29 .file "cast5-avx-x86_64-asm_64.S"
30
31 .extern cast_s1
32 .extern cast_s2
33 .extern cast_s3
34 .extern cast_s4
35
36 /* structure of crypto context */
37 #define km 0
38 #define kr (16*4)
39 #define rr ((16*4)+16)
40
41 /* s-boxes */
42 #define s1 cast_s1
43 #define s2 cast_s2
44 #define s3 cast_s3
45 #define s4 cast_s4
46
47 /**********************************************************************
48 16-way AVX cast5
49 **********************************************************************/
50 #define CTX %rdi
51
52 #define RL1 %xmm0
53 #define RR1 %xmm1
54 #define RL2 %xmm2
55 #define RR2 %xmm3
56 #define RL3 %xmm4
57 #define RR3 %xmm5
58 #define RL4 %xmm6
59 #define RR4 %xmm7
60
61 #define RX %xmm8
62
63 #define RKM %xmm9
64 #define RKR %xmm10
65 #define RKRF %xmm11
66 #define RKRR %xmm12
67
68 #define R32 %xmm13
69 #define R1ST %xmm14
70
71 #define RTMP %xmm15
72
73 #define RID1 %rbp
74 #define RID1d %ebp
75 #define RID2 %rsi
76 #define RID2d %esi
77
78 #define RGI1 %rdx
79 #define RGI1bl %dl
80 #define RGI1bh %dh
81 #define RGI2 %rcx
82 #define RGI2bl %cl
83 #define RGI2bh %ch
84
85 #define RGI3 %rax
86 #define RGI3bl %al
87 #define RGI3bh %ah
88 #define RGI4 %rbx
89 #define RGI4bl %bl
90 #define RGI4bh %bh
91
92 #define RFS1 %r8
93 #define RFS1d %r8d
94 #define RFS2 %r9
95 #define RFS2d %r9d
96 #define RFS3 %r10
97 #define RFS3d %r10d
98
99
100 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src ## bh, RID1d; \
102 movzbl src ## bl, RID2d; \
103 shrq $16, src; \
104 movl s1(, RID1, 4), dst ## d; \
105 op1 s2(, RID2, 4), dst ## d; \
106 movzbl src ## bh, RID1d; \
107 movzbl src ## bl, RID2d; \
108 interleave_op(il_reg); \
109 op2 s3(, RID1, 4), dst ## d; \
110 op3 s4(, RID2, 4), dst ## d;
111
112 #define dummy(d) /* do nothing */
113
114 #define shr_next(reg) \
115 shrq $16, reg;
116
117 #define F_head(a, x, gi1, gi2, op0) \
118 op0 a, RKM, x; \
119 vpslld RKRF, x, RTMP; \
120 vpsrld RKRR, x, x; \
121 vpor RTMP, x, x; \
122 \
123 vmovq x, gi1; \
124 vpextrq $1, x, gi2;
125
126 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
129 \
130 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
131 shlq $32, RFS2; \
132 orq RFS1, RFS2; \
133 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
134 shlq $32, RFS1; \
135 orq RFS1, RFS3; \
136 \
137 vmovq RFS2, x; \
138 vpinsrq $1, RFS3, x, x;
139
140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
143 \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
146 \
147 vpxor a1, RX, a1; \
148 vpxor a2, RTMP, a2;
149
150 #define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152 #define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154 #define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156
157 #define subround(a1, b1, a2, b2, f) \
158 F ## f ## _2(a1, b1, a2, b2);
159
160 #define round(l, r, n, f) \
161 vbroadcastss (km+(4*n))(CTX), RKM; \
162 vpand R1ST, RKR, RKRF; \
163 vpsubq RKRF, R32, RKRR; \
164 vpsrldq $1, RKR, RKR; \
165 subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
166 subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
167
168 #define enc_preload_rkr() \
169 vbroadcastss .L16_mask, RKR; \
170 /* add 16-bit rotation to key rotations (mod 32) */ \
171 vpxor kr(CTX), RKR, RKR;
172
173 #define dec_preload_rkr() \
174 vbroadcastss .L16_mask, RKR; \
175 /* add 16-bit rotation to key rotations (mod 32) */ \
176 vpxor kr(CTX), RKR, RKR; \
177 vpshufb .Lbswap128_mask, RKR, RKR;
178
179 #define transpose_2x4(x0, x1, t0, t1) \
180 vpunpckldq x1, x0, t0; \
181 vpunpckhdq x1, x0, t1; \
182 \
183 vpunpcklqdq t1, t0, x0; \
184 vpunpckhqdq t1, t0, x1;
185
186 #define inpack_blocks(x0, x1, t0, t1, rmask) \
187 vpshufb rmask, x0, x0; \
188 vpshufb rmask, x1, x1; \
189 \
190 transpose_2x4(x0, x1, t0, t1)
191
192 #define outunpack_blocks(x0, x1, t0, t1, rmask) \
193 transpose_2x4(x0, x1, t0, t1) \
194 \
195 vpshufb rmask, x0, x0; \
196 vpshufb rmask, x1, x1;
197
198 .section .rodata.cst16.bswap_mask, "aM", @progbits, 16
199 .align 16
200 .Lbswap_mask:
201 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
202 .section .rodata.cst16.bswap128_mask, "aM", @progbits, 16
203 .align 16
204 .Lbswap128_mask:
205 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
206 .section .rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
207 .align 16
208 .Lbswap_iv_mask:
209 .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
210
211 .section .rodata.cst4.16_mask, "aM", @progbits, 4
212 .align 4
213 .L16_mask:
214 .byte 16, 16, 16, 16
215 .section .rodata.cst4.32_mask, "aM", @progbits, 4
216 .align 4
217 .L32_mask:
218 .byte 32, 0, 0, 0
219 .section .rodata.cst4.first_mask, "aM", @progbits, 4
220 .align 4
221 .Lfirst_mask:
222 .byte 0x1f, 0, 0, 0
223
224 .text
225
226 .align 16
227 __cast5_enc_blk16:
228 /* input:
229 * %rdi: ctx, CTX
230 * RL1: blocks 1 and 2
231 * RR1: blocks 3 and 4
232 * RL2: blocks 5 and 6
233 * RR2: blocks 7 and 8
234 * RL3: blocks 9 and 10
235 * RR3: blocks 11 and 12
236 * RL4: blocks 13 and 14
237 * RR4: blocks 15 and 16
238 * output:
239 * RL1: encrypted blocks 1 and 2
240 * RR1: encrypted blocks 3 and 4
241 * RL2: encrypted blocks 5 and 6
242 * RR2: encrypted blocks 7 and 8
243 * RL3: encrypted blocks 9 and 10
244 * RR3: encrypted blocks 11 and 12
245 * RL4: encrypted blocks 13 and 14
246 * RR4: encrypted blocks 15 and 16
247 */
248
249 pushq %rbp;
250 pushq %rbx;
251
252 vmovdqa .Lbswap_mask, RKM;
253 vmovd .Lfirst_mask, R1ST;
254 vmovd .L32_mask, R32;
255 enc_preload_rkr();
256
257 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
258 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
259 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
260 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
261
262 round(RL, RR, 0, 1);
263 round(RR, RL, 1, 2);
264 round(RL, RR, 2, 3);
265 round(RR, RL, 3, 1);
266 round(RL, RR, 4, 2);
267 round(RR, RL, 5, 3);
268 round(RL, RR, 6, 1);
269 round(RR, RL, 7, 2);
270 round(RL, RR, 8, 3);
271 round(RR, RL, 9, 1);
272 round(RL, RR, 10, 2);
273 round(RR, RL, 11, 3);
274
275 movzbl rr(CTX), %eax;
276 testl %eax, %eax;
277 jnz .L__skip_enc;
278
279 round(RL, RR, 12, 1);
280 round(RR, RL, 13, 2);
281 round(RL, RR, 14, 3);
282 round(RR, RL, 15, 1);
283
284 .L__skip_enc:
285 popq %rbx;
286 popq %rbp;
287
288 vmovdqa .Lbswap_mask, RKM;
289
290 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
291 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
292 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
293 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
294
295 ret;
296 ENDPROC(__cast5_enc_blk16)
297
298 .align 16
299 __cast5_dec_blk16:
300 /* input:
301 * %rdi: ctx, CTX
302 * RL1: encrypted blocks 1 and 2
303 * RR1: encrypted blocks 3 and 4
304 * RL2: encrypted blocks 5 and 6
305 * RR2: encrypted blocks 7 and 8
306 * RL3: encrypted blocks 9 and 10
307 * RR3: encrypted blocks 11 and 12
308 * RL4: encrypted blocks 13 and 14
309 * RR4: encrypted blocks 15 and 16
310 * output:
311 * RL1: decrypted blocks 1 and 2
312 * RR1: decrypted blocks 3 and 4
313 * RL2: decrypted blocks 5 and 6
314 * RR2: decrypted blocks 7 and 8
315 * RL3: decrypted blocks 9 and 10
316 * RR3: decrypted blocks 11 and 12
317 * RL4: decrypted blocks 13 and 14
318 * RR4: decrypted blocks 15 and 16
319 */
320
321 pushq %rbp;
322 pushq %rbx;
323
324 vmovdqa .Lbswap_mask, RKM;
325 vmovd .Lfirst_mask, R1ST;
326 vmovd .L32_mask, R32;
327 dec_preload_rkr();
328
329 inpack_blocks(RL1, RR1, RTMP, RX, RKM);
330 inpack_blocks(RL2, RR2, RTMP, RX, RKM);
331 inpack_blocks(RL3, RR3, RTMP, RX, RKM);
332 inpack_blocks(RL4, RR4, RTMP, RX, RKM);
333
334 movzbl rr(CTX), %eax;
335 testl %eax, %eax;
336 jnz .L__skip_dec;
337
338 round(RL, RR, 15, 1);
339 round(RR, RL, 14, 3);
340 round(RL, RR, 13, 2);
341 round(RR, RL, 12, 1);
342
343 .L__dec_tail:
344 round(RL, RR, 11, 3);
345 round(RR, RL, 10, 2);
346 round(RL, RR, 9, 1);
347 round(RR, RL, 8, 3);
348 round(RL, RR, 7, 2);
349 round(RR, RL, 6, 1);
350 round(RL, RR, 5, 3);
351 round(RR, RL, 4, 2);
352 round(RL, RR, 3, 1);
353 round(RR, RL, 2, 3);
354 round(RL, RR, 1, 2);
355 round(RR, RL, 0, 1);
356
357 vmovdqa .Lbswap_mask, RKM;
358 popq %rbx;
359 popq %rbp;
360
361 outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
362 outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
363 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
364 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
365
366 ret;
367
368 .L__skip_dec:
369 vpsrldq $4, RKR, RKR;
370 jmp .L__dec_tail;
371 ENDPROC(__cast5_dec_blk16)
372
373 ENTRY(cast5_ecb_enc_16way)
374 /* input:
375 * %rdi: ctx, CTX
376 * %rsi: dst
377 * %rdx: src
378 */
379 FRAME_BEGIN
380
381 movq %rsi, %r11;
382
383 vmovdqu (0*4*4)(%rdx), RL1;
384 vmovdqu (1*4*4)(%rdx), RR1;
385 vmovdqu (2*4*4)(%rdx), RL2;
386 vmovdqu (3*4*4)(%rdx), RR2;
387 vmovdqu (4*4*4)(%rdx), RL3;
388 vmovdqu (5*4*4)(%rdx), RR3;
389 vmovdqu (6*4*4)(%rdx), RL4;
390 vmovdqu (7*4*4)(%rdx), RR4;
391
392 call __cast5_enc_blk16;
393
394 vmovdqu RR1, (0*4*4)(%r11);
395 vmovdqu RL1, (1*4*4)(%r11);
396 vmovdqu RR2, (2*4*4)(%r11);
397 vmovdqu RL2, (3*4*4)(%r11);
398 vmovdqu RR3, (4*4*4)(%r11);
399 vmovdqu RL3, (5*4*4)(%r11);
400 vmovdqu RR4, (6*4*4)(%r11);
401 vmovdqu RL4, (7*4*4)(%r11);
402
403 FRAME_END
404 ret;
405 ENDPROC(cast5_ecb_enc_16way)
406
407 ENTRY(cast5_ecb_dec_16way)
408 /* input:
409 * %rdi: ctx, CTX
410 * %rsi: dst
411 * %rdx: src
412 */
413
414 FRAME_BEGIN
415 movq %rsi, %r11;
416
417 vmovdqu (0*4*4)(%rdx), RL1;
418 vmovdqu (1*4*4)(%rdx), RR1;
419 vmovdqu (2*4*4)(%rdx), RL2;
420 vmovdqu (3*4*4)(%rdx), RR2;
421 vmovdqu (4*4*4)(%rdx), RL3;
422 vmovdqu (5*4*4)(%rdx), RR3;
423 vmovdqu (6*4*4)(%rdx), RL4;
424 vmovdqu (7*4*4)(%rdx), RR4;
425
426 call __cast5_dec_blk16;
427
428 vmovdqu RR1, (0*4*4)(%r11);
429 vmovdqu RL1, (1*4*4)(%r11);
430 vmovdqu RR2, (2*4*4)(%r11);
431 vmovdqu RL2, (3*4*4)(%r11);
432 vmovdqu RR3, (4*4*4)(%r11);
433 vmovdqu RL3, (5*4*4)(%r11);
434 vmovdqu RR4, (6*4*4)(%r11);
435 vmovdqu RL4, (7*4*4)(%r11);
436
437 FRAME_END
438 ret;
439 ENDPROC(cast5_ecb_dec_16way)
440
441 ENTRY(cast5_cbc_dec_16way)
442 /* input:
443 * %rdi: ctx, CTX
444 * %rsi: dst
445 * %rdx: src
446 */
447 FRAME_BEGIN
448
449 pushq %r12;
450
451 movq %rsi, %r11;
452 movq %rdx, %r12;
453
454 vmovdqu (0*16)(%rdx), RL1;
455 vmovdqu (1*16)(%rdx), RR1;
456 vmovdqu (2*16)(%rdx), RL2;
457 vmovdqu (3*16)(%rdx), RR2;
458 vmovdqu (4*16)(%rdx), RL3;
459 vmovdqu (5*16)(%rdx), RR3;
460 vmovdqu (6*16)(%rdx), RL4;
461 vmovdqu (7*16)(%rdx), RR4;
462
463 call __cast5_dec_blk16;
464
465 /* xor with src */
466 vmovq (%r12), RX;
467 vpshufd $0x4f, RX, RX;
468 vpxor RX, RR1, RR1;
469 vpxor 0*16+8(%r12), RL1, RL1;
470 vpxor 1*16+8(%r12), RR2, RR2;
471 vpxor 2*16+8(%r12), RL2, RL2;
472 vpxor 3*16+8(%r12), RR3, RR3;
473 vpxor 4*16+8(%r12), RL3, RL3;
474 vpxor 5*16+8(%r12), RR4, RR4;
475 vpxor 6*16+8(%r12), RL4, RL4;
476
477 vmovdqu RR1, (0*16)(%r11);
478 vmovdqu RL1, (1*16)(%r11);
479 vmovdqu RR2, (2*16)(%r11);
480 vmovdqu RL2, (3*16)(%r11);
481 vmovdqu RR3, (4*16)(%r11);
482 vmovdqu RL3, (5*16)(%r11);
483 vmovdqu RR4, (6*16)(%r11);
484 vmovdqu RL4, (7*16)(%r11);
485
486 popq %r12;
487
488 FRAME_END
489 ret;
490 ENDPROC(cast5_cbc_dec_16way)
491
492 ENTRY(cast5_ctr_16way)
493 /* input:
494 * %rdi: ctx, CTX
495 * %rsi: dst
496 * %rdx: src
497 * %rcx: iv (big endian, 64bit)
498 */
499 FRAME_BEGIN
500
501 pushq %r12;
502
503 movq %rsi, %r11;
504 movq %rdx, %r12;
505
506 vpcmpeqd RTMP, RTMP, RTMP;
507 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
508
509 vpcmpeqd RKR, RKR, RKR;
510 vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
511 vmovdqa .Lbswap_iv_mask, R1ST;
512 vmovdqa .Lbswap128_mask, RKM;
513
514 /* load IV and byteswap */
515 vmovq (%rcx), RX;
516 vpshufb R1ST, RX, RX;
517
518 /* construct IVs */
519 vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
520 vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
521 vpsubq RKR, RX, RX;
522 vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
523 vpsubq RKR, RX, RX;
524 vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
525 vpsubq RKR, RX, RX;
526 vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
527 vpsubq RKR, RX, RX;
528 vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
529 vpsubq RKR, RX, RX;
530 vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
531 vpsubq RKR, RX, RX;
532 vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
533 vpsubq RKR, RX, RX;
534 vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
535
536 /* store last IV */
537 vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
538 vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
539 vmovq RX, (%rcx);
540
541 call __cast5_enc_blk16;
542
543 /* dst = src ^ iv */
544 vpxor (0*16)(%r12), RR1, RR1;
545 vpxor (1*16)(%r12), RL1, RL1;
546 vpxor (2*16)(%r12), RR2, RR2;
547 vpxor (3*16)(%r12), RL2, RL2;
548 vpxor (4*16)(%r12), RR3, RR3;
549 vpxor (5*16)(%r12), RL3, RL3;
550 vpxor (6*16)(%r12), RR4, RR4;
551 vpxor (7*16)(%r12), RL4, RL4;
552 vmovdqu RR1, (0*16)(%r11);
553 vmovdqu RL1, (1*16)(%r11);
554 vmovdqu RR2, (2*16)(%r11);
555 vmovdqu RL2, (3*16)(%r11);
556 vmovdqu RR3, (4*16)(%r11);
557 vmovdqu RL3, (5*16)(%r11);
558 vmovdqu RR4, (6*16)(%r11);
559 vmovdqu RL4, (7*16)(%r11);
560
561 popq %r12;
562
563 FRAME_END
564 ret;
565 ENDPROC(cast5_ctr_16way)