]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/crypto/cast6-avx-x86_64-asm_64.S
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / crypto / cast6-avx-x86_64-asm_64.S
1 /*
2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
3 *
4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
6 *
7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26 #include <linux/linkage.h>
27 #include <asm/frame.h>
28 #include "glue_helper-asm-avx.S"
29
30 .file "cast6-avx-x86_64-asm_64.S"
31
32 .extern cast_s1
33 .extern cast_s2
34 .extern cast_s3
35 .extern cast_s4
36
37 /* structure of crypto context */
38 #define km 0
39 #define kr (12*4*4)
40
41 /* s-boxes */
42 #define s1 cast_s1
43 #define s2 cast_s2
44 #define s3 cast_s3
45 #define s4 cast_s4
46
47 /**********************************************************************
48 8-way AVX cast6
49 **********************************************************************/
50 #define CTX %rdi
51
52 #define RA1 %xmm0
53 #define RB1 %xmm1
54 #define RC1 %xmm2
55 #define RD1 %xmm3
56
57 #define RA2 %xmm4
58 #define RB2 %xmm5
59 #define RC2 %xmm6
60 #define RD2 %xmm7
61
62 #define RX %xmm8
63
64 #define RKM %xmm9
65 #define RKR %xmm10
66 #define RKRF %xmm11
67 #define RKRR %xmm12
68 #define R32 %xmm13
69 #define R1ST %xmm14
70
71 #define RTMP %xmm15
72
73 #define RID1 %rbp
74 #define RID1d %ebp
75 #define RID2 %rsi
76 #define RID2d %esi
77
78 #define RGI1 %rdx
79 #define RGI1bl %dl
80 #define RGI1bh %dh
81 #define RGI2 %rcx
82 #define RGI2bl %cl
83 #define RGI2bh %ch
84
85 #define RGI3 %rax
86 #define RGI3bl %al
87 #define RGI3bh %ah
88 #define RGI4 %rbx
89 #define RGI4bl %bl
90 #define RGI4bh %bh
91
92 #define RFS1 %r8
93 #define RFS1d %r8d
94 #define RFS2 %r9
95 #define RFS2d %r9d
96 #define RFS3 %r10
97 #define RFS3d %r10d
98
99
100 #define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
101 movzbl src ## bh, RID1d; \
102 movzbl src ## bl, RID2d; \
103 shrq $16, src; \
104 movl s1(, RID1, 4), dst ## d; \
105 op1 s2(, RID2, 4), dst ## d; \
106 movzbl src ## bh, RID1d; \
107 movzbl src ## bl, RID2d; \
108 interleave_op(il_reg); \
109 op2 s3(, RID1, 4), dst ## d; \
110 op3 s4(, RID2, 4), dst ## d;
111
112 #define dummy(d) /* do nothing */
113
114 #define shr_next(reg) \
115 shrq $16, reg;
116
117 #define F_head(a, x, gi1, gi2, op0) \
118 op0 a, RKM, x; \
119 vpslld RKRF, x, RTMP; \
120 vpsrld RKRR, x, x; \
121 vpor RTMP, x, x; \
122 \
123 vmovq x, gi1; \
124 vpextrq $1, x, gi2;
125
126 #define F_tail(a, x, gi1, gi2, op1, op2, op3) \
127 lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
128 lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
129 \
130 lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none); \
131 shlq $32, RFS2; \
132 orq RFS1, RFS2; \
133 lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none); \
134 shlq $32, RFS1; \
135 orq RFS1, RFS3; \
136 \
137 vmovq RFS2, x; \
138 vpinsrq $1, RFS3, x, x;
139
140 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
141 F_head(b1, RX, RGI1, RGI2, op0); \
142 F_head(b2, RX, RGI3, RGI4, op0); \
143 \
144 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
145 F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3); \
146 \
147 vpxor a1, RX, a1; \
148 vpxor a2, RTMP, a2;
149
150 #define F1_2(a1, b1, a2, b2) \
151 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
152 #define F2_2(a1, b1, a2, b2) \
153 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
154 #define F3_2(a1, b1, a2, b2) \
155 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156
157 #define qop(in, out, f) \
158 F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
159
160 #define get_round_keys(nn) \
161 vbroadcastss (km+(4*(nn)))(CTX), RKM; \
162 vpand R1ST, RKR, RKRF; \
163 vpsubq RKRF, R32, RKRR; \
164 vpsrldq $1, RKR, RKR;
165
166 #define Q(n) \
167 get_round_keys(4*n+0); \
168 qop(RD, RC, 1); \
169 \
170 get_round_keys(4*n+1); \
171 qop(RC, RB, 2); \
172 \
173 get_round_keys(4*n+2); \
174 qop(RB, RA, 3); \
175 \
176 get_round_keys(4*n+3); \
177 qop(RA, RD, 1);
178
179 #define QBAR(n) \
180 get_round_keys(4*n+3); \
181 qop(RA, RD, 1); \
182 \
183 get_round_keys(4*n+2); \
184 qop(RB, RA, 3); \
185 \
186 get_round_keys(4*n+1); \
187 qop(RC, RB, 2); \
188 \
189 get_round_keys(4*n+0); \
190 qop(RD, RC, 1);
191
192 #define shuffle(mask) \
193 vpshufb mask, RKR, RKR;
194
195 #define preload_rkr(n, do_mask, mask) \
196 vbroadcastss .L16_mask, RKR; \
197 /* add 16-bit rotation to key rotations (mod 32) */ \
198 vpxor (kr+n*16)(CTX), RKR, RKR; \
199 do_mask(mask);
200
201 #define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
202 vpunpckldq x1, x0, t0; \
203 vpunpckhdq x1, x0, t2; \
204 vpunpckldq x3, x2, t1; \
205 vpunpckhdq x3, x2, x3; \
206 \
207 vpunpcklqdq t1, t0, x0; \
208 vpunpckhqdq t1, t0, x1; \
209 vpunpcklqdq x3, t2, x2; \
210 vpunpckhqdq x3, t2, x3;
211
212 #define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
213 vpshufb rmask, x0, x0; \
214 vpshufb rmask, x1, x1; \
215 vpshufb rmask, x2, x2; \
216 vpshufb rmask, x3, x3; \
217 \
218 transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
219
220 #define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
221 transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
222 \
223 vpshufb rmask, x0, x0; \
224 vpshufb rmask, x1, x1; \
225 vpshufb rmask, x2, x2; \
226 vpshufb rmask, x3, x3;
227
228 .data
229
230 .align 16
231 .Lxts_gf128mul_and_shl1_mask:
232 .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
233 .Lbswap_mask:
234 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
235 .Lbswap128_mask:
236 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
237 .Lrkr_enc_Q_Q_QBAR_QBAR:
238 .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
239 .Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
240 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
241 .Lrkr_dec_Q_Q_Q_Q:
242 .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
243 .Lrkr_dec_Q_Q_QBAR_QBAR:
244 .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
245 .Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
246 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
247 .L16_mask:
248 .byte 16, 16, 16, 16
249 .L32_mask:
250 .byte 32, 0, 0, 0
251 .Lfirst_mask:
252 .byte 0x1f, 0, 0, 0
253
254 .text
255
256 .align 8
257 __cast6_enc_blk8:
258 /* input:
259 * %rdi: ctx, CTX
260 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
261 * output:
262 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
263 */
264
265 pushq %rbp;
266 pushq %rbx;
267
268 vmovdqa .Lbswap_mask, RKM;
269 vmovd .Lfirst_mask, R1ST;
270 vmovd .L32_mask, R32;
271
272 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
273 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
274
275 preload_rkr(0, dummy, none);
276 Q(0);
277 Q(1);
278 Q(2);
279 Q(3);
280 preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
281 Q(4);
282 Q(5);
283 QBAR(6);
284 QBAR(7);
285 preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
286 QBAR(8);
287 QBAR(9);
288 QBAR(10);
289 QBAR(11);
290
291 popq %rbx;
292 popq %rbp;
293
294 vmovdqa .Lbswap_mask, RKM;
295
296 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
297 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
298
299 ret;
300 ENDPROC(__cast6_enc_blk8)
301
302 .align 8
303 __cast6_dec_blk8:
304 /* input:
305 * %rdi: ctx, CTX
306 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
307 * output:
308 * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
309 */
310
311 pushq %rbp;
312 pushq %rbx;
313
314 vmovdqa .Lbswap_mask, RKM;
315 vmovd .Lfirst_mask, R1ST;
316 vmovd .L32_mask, R32;
317
318 inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
319 inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
320
321 preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
322 Q(11);
323 Q(10);
324 Q(9);
325 Q(8);
326 preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
327 Q(7);
328 Q(6);
329 QBAR(5);
330 QBAR(4);
331 preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
332 QBAR(3);
333 QBAR(2);
334 QBAR(1);
335 QBAR(0);
336
337 popq %rbx;
338 popq %rbp;
339
340 vmovdqa .Lbswap_mask, RKM;
341 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
342 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
343
344 ret;
345 ENDPROC(__cast6_dec_blk8)
346
347 ENTRY(cast6_ecb_enc_8way)
348 /* input:
349 * %rdi: ctx, CTX
350 * %rsi: dst
351 * %rdx: src
352 */
353 FRAME_BEGIN
354
355 movq %rsi, %r11;
356
357 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
358
359 call __cast6_enc_blk8;
360
361 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
362
363 FRAME_END
364 ret;
365 ENDPROC(cast6_ecb_enc_8way)
366
367 ENTRY(cast6_ecb_dec_8way)
368 /* input:
369 * %rdi: ctx, CTX
370 * %rsi: dst
371 * %rdx: src
372 */
373 FRAME_BEGIN
374
375 movq %rsi, %r11;
376
377 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
378
379 call __cast6_dec_blk8;
380
381 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
382
383 FRAME_END
384 ret;
385 ENDPROC(cast6_ecb_dec_8way)
386
387 ENTRY(cast6_cbc_dec_8way)
388 /* input:
389 * %rdi: ctx, CTX
390 * %rsi: dst
391 * %rdx: src
392 */
393 FRAME_BEGIN
394
395 pushq %r12;
396
397 movq %rsi, %r11;
398 movq %rdx, %r12;
399
400 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
401
402 call __cast6_dec_blk8;
403
404 store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
405
406 popq %r12;
407
408 FRAME_END
409 ret;
410 ENDPROC(cast6_cbc_dec_8way)
411
412 ENTRY(cast6_ctr_8way)
413 /* input:
414 * %rdi: ctx, CTX
415 * %rsi: dst
416 * %rdx: src
417 * %rcx: iv (little endian, 128bit)
418 */
419 FRAME_BEGIN
420
421 pushq %r12;
422
423 movq %rsi, %r11;
424 movq %rdx, %r12;
425
426 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
427 RD2, RX, RKR, RKM);
428
429 call __cast6_enc_blk8;
430
431 store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
432
433 popq %r12;
434
435 FRAME_END
436 ret;
437 ENDPROC(cast6_ctr_8way)
438
439 ENTRY(cast6_xts_enc_8way)
440 /* input:
441 * %rdi: ctx, CTX
442 * %rsi: dst
443 * %rdx: src
444 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
445 */
446 FRAME_BEGIN
447
448 movq %rsi, %r11;
449
450 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
451 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
452 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
453
454 call __cast6_enc_blk8;
455
456 /* dst <= regs xor IVs(in dst) */
457 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
458
459 FRAME_END
460 ret;
461 ENDPROC(cast6_xts_enc_8way)
462
463 ENTRY(cast6_xts_dec_8way)
464 /* input:
465 * %rdi: ctx, CTX
466 * %rsi: dst
467 * %rdx: src
468 * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
469 */
470 FRAME_BEGIN
471
472 movq %rsi, %r11;
473
474 /* regs <= src, dst <= IVs, regs <= regs xor IVs */
475 load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
476 RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
477
478 call __cast6_dec_blk8;
479
480 /* dst <= regs xor IVs(in dst) */
481 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
482
483 FRAME_END
484 ret;
485 ENDPROC(cast6_xts_dec_8way)