]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/crypto/cast6-avx-x86_64-asm_64.S
crypto: x86 - make constants readonly, allow linker to merge them
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / crypto / cast6-avx-x86_64-asm_64.S
1 /*
2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26 #include <linux/linkage.h>
27 #include <asm/frame.h>
28 #include "glue_helper-asm-avx.S"
29
30 .file "cast6-avx-x86_64-asm_64.S"
31
32 .extern cast_s1
33 .extern cast_s2
34 .extern cast_s3
35 .extern cast_s4
36
37 /* structure of crypto context */
38 #define km 0
39 #define kr (12*4*4)
40
41 /* s-boxes */
42 #define s1 cast_s1
43 #define s2 cast_s2
44 #define s3 cast_s3
45 #define s4 cast_s4
46
47 /**********************************************************************
48 8-way AVX cast6
49 **********************************************************************/
50 #define CTX %rdi
51
52 #define RA1 %xmm0
53 #define RB1 %xmm1
54 #define RC1 %xmm2
55 #define RD1 %xmm3
56
57 #define RA2 %xmm4
58 #define RB2 %xmm5
59 #define RC2 %xmm6
60 #define RD2 %xmm7
61
62 #define RX %xmm8
63
64 #define RKM %xmm9
65 #define RKR %xmm10
66 #define RKRF %xmm11
67 #define RKRR %xmm12
68 #define R32 %xmm13
69 #define R1ST %xmm14
70
71 #define RTMP %xmm15
72
73 #define RID1 %rbp
74 #define RID1d %ebp
75 #define RID2 %rsi
76 #define RID2d %esi
77
78 #define RGI1 %rdx
79 #define RGI1bl %dl
80 #define RGI1bh %dh
81 #define RGI2 %rcx
82 #define RGI2bl %cl
83 #define RGI2bh %ch
84
85 #define RGI3 %rax
86 #define RGI3bl %al
87 #define RGI3bh %ah
88 #define RGI4 %rbx
89 #define RGI4bl %bl
90 #define RGI4bh %bh
91
92 #define RFS1 %r8
93 #define RFS1d %r8d
94 #define RFS2 %r9
95 #define RFS2d %r9d
96 #define RFS3 %r10
97 #define RFS3d %r10d
98
99
100 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src ## bh, RID1d; \
102 movzbl src ## bl, RID2d; \
103 shrq $16, src; \
104 movl s1(, RID1, 4), dst ## d; \
105 op1 s2(, RID2, 4), dst ## d; \
106 movzbl src ## bh, RID1d; \
107 movzbl src ## bl, RID2d; \
108 interleave_op(il_reg); \
109 op2 s3(, RID1, 4), dst ## d; \
110 op3 s4(, RID2, 4), dst ## d;
111
112 #define dummy(d) /* do nothing */
113
114 #define shr_next(reg) \
115 shrq $16, reg;
116
117 #define F_head(a, x, gi1, gi2, op0) \
118 op0 a, RKM, x; \
119 vpslld RKRF, x, RTMP; \
120 vpsrld RKRR, x, x; \
121 vpor RTMP, x, x; \
122 \
123 vmovq x, gi1; \
124 vpextrq $1, x, gi2;
125
126 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
129 \
130 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
131 shlq $32, RFS2; \
132 orq RFS1, RFS2; \
133 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
134 shlq $32, RFS1; \
135 orq RFS1, RFS3; \
136 \
137 vmovq RFS2, x; \
138 vpinsrq $1, RFS3, x, x;
139
140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
143 \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
146 \
147 vpxor a1, RX, a1; \
148 vpxor a2, RTMP, a2;
149
150 #define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152 #define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154 #define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156
157 #define qop(in, out, f) \
158 F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
159
160 #define get_round_keys(nn) \
161 vbroadcastss (km+(4*(nn)))(CTX), RKM; \
162 vpand R1ST, RKR, RKRF; \
163 vpsubq RKRF, R32, RKRR; \
164 vpsrldq $1, RKR, RKR;
165
166 #define Q(n) \
167 get_round_keys(4*n+0); \
168 qop(RD, RC, 1); \
169 \
170 get_round_keys(4*n+1); \
171 qop(RC, RB, 2); \
172 \
173 get_round_keys(4*n+2); \
174 qop(RB, RA, 3); \
175 \
176 get_round_keys(4*n+3); \
177 qop(RA, RD, 1);
178
179 #define QBAR(n) \
180 get_round_keys(4*n+3); \
181 qop(RA, RD, 1); \
182 \
183 get_round_keys(4*n+2); \
184 qop(RB, RA, 3); \
185 \
186 get_round_keys(4*n+1); \
187 qop(RC, RB, 2); \
188 \
189 get_round_keys(4*n+0); \
190 qop(RD, RC, 1);
191
192 #define shuffle(mask) \
193 vpshufb mask, RKR, RKR;
194
195 #define preload_rkr(n, do_mask, mask) \
196 vbroadcastss .L16_mask, RKR; \
197 /* add 16-bit rotation to key rotations (mod 32) */ \
198 vpxor (kr+n*16)(CTX), RKR, RKR; \
199 do_mask(mask);
200
201 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
202 vpunpckldq x1, x0, t0; \
203 vpunpckhdq x1, x0, t2; \
204 vpunpckldq x3, x2, t1; \
205 vpunpckhdq x3, x2, x3; \
206 \
207 vpunpcklqdq t1, t0, x0; \
208 vpunpckhqdq t1, t0, x1; \
209 vpunpcklqdq x3, t2, x2; \
210 vpunpckhqdq x3, t2, x3;
211
212 #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
213 vpshufb rmask, x0, x0; \
214 vpshufb rmask, x1, x1; \
215 vpshufb rmask, x2, x2; \
216 vpshufb rmask, x3, x3; \
217 \
218 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
219
220 #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
221 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
222 \
223 vpshufb rmask, x0, x0; \
224 vpshufb rmask, x1, x1; \
225 vpshufb rmask, x2, x2; \
226 vpshufb rmask, x3, x3;
227
228 .section .rodata.cst16, "aM", @progbits, 16
229 .align 16
230 .Lxts_gf128mul_and_shl1_mask:
231 .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
232 .Lbswap_mask:
233 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
234 .Lbswap128_mask:
235 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
236 .Lrkr_enc_Q_Q_QBAR_QBAR:
237 .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
238 .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
239 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
240 .Lrkr_dec_Q_Q_Q_Q:
241 .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
242 .Lrkr_dec_Q_Q_QBAR_QBAR:
243 .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
244 .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
245 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
246
247 .section .rodata.cst4.L16_mask, "aM", @progbits, 4
248 .align 4
249 .L16_mask:
250 .byte 16, 16, 16, 16
251
252 .section .rodata.cst4.L32_mask, "aM", @progbits, 4
253 .align 4
254 .L32_mask:
255 .byte 32, 0, 0, 0
256
257 .section .rodata.cst4.first_mask, "aM", @progbits, 4
258 .align 4
259 .Lfirst_mask:
260 .byte 0x1f, 0, 0, 0
261
262 .text
263
264 .align 8
265 __cast6_enc_blk8:
266 /* input:
267 * %rdi: ctx, CTX
268 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
269 * output:
270 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
271 */
272
273 pushq %rbp;
274 pushq %rbx;
275
276 vmovdqa .Lbswap_mask, RKM;
277 vmovd .Lfirst_mask, R1ST;
278 vmovd .L32_mask, R32;
279
280 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
281 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
282
283 preload_rkr(0, dummy, none);
284 Q(0);
285 Q(1);
286 Q(2);
287 Q(3);
288 preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
289 Q(4);
290 Q(5);
291 QBAR(6);
292 QBAR(7);
293 preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
294 QBAR(8);
295 QBAR(9);
296 QBAR(10);
297 QBAR(11);
298
299 popq %rbx;
300 popq %rbp;
301
302 vmovdqa .Lbswap_mask, RKM;
303
304 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
305 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
306
307 ret;
308 ENDPROC(__cast6_enc_blk8)
309
310 .align 8
311 __cast6_dec_blk8:
312 /* input:
313 * %rdi: ctx, CTX
314 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
315 * output:
316 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
317 */
318
319 pushq %rbp;
320 pushq %rbx;
321
322 vmovdqa .Lbswap_mask, RKM;
323 vmovd .Lfirst_mask, R1ST;
324 vmovd .L32_mask, R32;
325
326 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
327 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
328
329 preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
330 Q(11);
331 Q(10);
332 Q(9);
333 Q(8);
334 preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
335 Q(7);
336 Q(6);
337 QBAR(5);
338 QBAR(4);
339 preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
340 QBAR(3);
341 QBAR(2);
342 QBAR(1);
343 QBAR(0);
344
345 popq %rbx;
346 popq %rbp;
347
348 vmovdqa .Lbswap_mask, RKM;
349 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
350 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
351
352 ret;
353 ENDPROC(__cast6_dec_blk8)
354
355 ENTRY(cast6_ecb_enc_8way)
356 /* input:
357 * %rdi: ctx, CTX
358 * %rsi: dst
359 * %rdx: src
360 */
361 FRAME_BEGIN
362
363 movq %rsi, %r11;
364
365 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
366
367 call __cast6_enc_blk8;
368
369 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
370
371 FRAME_END
372 ret;
373 ENDPROC(cast6_ecb_enc_8way)
374
375 ENTRY(cast6_ecb_dec_8way)
376 /* input:
377 * %rdi: ctx, CTX
378 * %rsi: dst
379 * %rdx: src
380 */
381 FRAME_BEGIN
382
383 movq %rsi, %r11;
384
385 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
386
387 call __cast6_dec_blk8;
388
389 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
390
391 FRAME_END
392 ret;
393 ENDPROC(cast6_ecb_dec_8way)
394
395 ENTRY(cast6_cbc_dec_8way)
396 /* input:
397 * %rdi: ctx, CTX
398 * %rsi: dst
399 * %rdx: src
400 */
401 FRAME_BEGIN
402
403 pushq %r12;
404
405 movq %rsi, %r11;
406 movq %rdx, %r12;
407
408 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
409
410 call __cast6_dec_blk8;
411
412 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
413
414 popq %r12;
415
416 FRAME_END
417 ret;
418 ENDPROC(cast6_cbc_dec_8way)
419
420 ENTRY(cast6_ctr_8way)
421 /* input:
422 * %rdi: ctx, CTX
423 * %rsi: dst
424 * %rdx: src
425 * %rcx: iv (little endian, 128bit)
426 */
427 FRAME_BEGIN
428
429 pushq %r12;
430
431 movq %rsi, %r11;
432 movq %rdx, %r12;
433
434 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
435 RD2, RX, RKR, RKM);
436
437 call __cast6_enc_blk8;
438
439 store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
440
441 popq %r12;
442
443 FRAME_END
444 ret;
445 ENDPROC(cast6_ctr_8way)
446
447 ENTRY(cast6_xts_enc_8way)
448 /* input:
449 * %rdi: ctx, CTX
450 * %rsi: dst
451 * %rdx: src
452 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
453 */
454 FRAME_BEGIN
455
456 movq %rsi, %r11;
457
458 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
459 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
460 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
461
462 call __cast6_enc_blk8;
463
464 /* dst <= regs xor IVs(in dst) */
465 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
466
467 FRAME_END
468 ret;
469 ENDPROC(cast6_xts_enc_8way)
470
471 ENTRY(cast6_xts_dec_8way)
472 /* input:
473 * %rdi: ctx, CTX
474 * %rsi: dst
475 * %rdx: src
476 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
477 */
478 FRAME_BEGIN
479
480 movq %rsi, %r11;
481
482 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
483 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
484 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
485
486 call __cast6_dec_blk8;
487
488 /* dst <= regs xor IVs(in dst) */
489 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
490
491 FRAME_END
492 ret;
493 ENDPROC(cast6_xts_dec_8way)