]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/arm64/crypto/aes-glue.c
a1c4e30d76c9145e39d8084f0ffa2a8b934de3ad
[mirror_ubuntu-focal-kernel.git] / arch / arm64 / crypto / aes-glue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
4 *
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */
7
8 #include <asm/neon.h>
9 #include <asm/hwcap.h>
10 #include <asm/simd.h>
11 #include <crypto/aes.h>
12 #include <crypto/ctr.h>
13 #include <crypto/sha.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/internal/simd.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/module.h>
19 #include <linux/cpufeature.h>
20 #include <crypto/xts.h>
21
22 #include "aes-ce-setkey.h"
23
24 #ifdef USE_V8_CRYPTO_EXTENSIONS
25 #define MODE "ce"
26 #define PRIO 300
27 #define aes_expandkey ce_aes_expandkey
28 #define aes_ecb_encrypt ce_aes_ecb_encrypt
29 #define aes_ecb_decrypt ce_aes_ecb_decrypt
30 #define aes_cbc_encrypt ce_aes_cbc_encrypt
31 #define aes_cbc_decrypt ce_aes_cbc_decrypt
32 #define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
33 #define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
34 #define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt
35 #define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt
36 #define aes_ctr_encrypt ce_aes_ctr_encrypt
37 #define aes_xts_encrypt ce_aes_xts_encrypt
38 #define aes_xts_decrypt ce_aes_xts_decrypt
39 #define aes_mac_update ce_aes_mac_update
40 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
41 #else
42 #define MODE "neon"
43 #define PRIO 200
44 #define aes_ecb_encrypt neon_aes_ecb_encrypt
45 #define aes_ecb_decrypt neon_aes_ecb_decrypt
46 #define aes_cbc_encrypt neon_aes_cbc_encrypt
47 #define aes_cbc_decrypt neon_aes_cbc_decrypt
48 #define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
49 #define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
50 #define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt
51 #define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt
52 #define aes_ctr_encrypt neon_aes_ctr_encrypt
53 #define aes_xts_encrypt neon_aes_xts_encrypt
54 #define aes_xts_decrypt neon_aes_xts_decrypt
55 #define aes_mac_update neon_aes_mac_update
56 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
57 #endif
58 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
59 MODULE_ALIAS_CRYPTO("ecb(aes)");
60 MODULE_ALIAS_CRYPTO("cbc(aes)");
61 MODULE_ALIAS_CRYPTO("ctr(aes)");
62 MODULE_ALIAS_CRYPTO("xts(aes)");
63 #endif
64 MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
65 MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)");
66 MODULE_ALIAS_CRYPTO("cmac(aes)");
67 MODULE_ALIAS_CRYPTO("xcbc(aes)");
68 MODULE_ALIAS_CRYPTO("cbcmac(aes)");
69
70 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
71 MODULE_LICENSE("GPL v2");
72
73 /* defined in aes-modes.S */
74 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
75 int rounds, int blocks);
76 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
77 int rounds, int blocks);
78
79 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
80 int rounds, int blocks, u8 iv[]);
81 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
82 int rounds, int blocks, u8 iv[]);
83
84 asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
85 int rounds, int bytes, u8 const iv[]);
86 asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
87 int rounds, int bytes, u8 const iv[]);
88
89 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
90 int rounds, int blocks, u8 ctr[]);
91
92 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
93 int rounds, int blocks, u32 const rk2[], u8 iv[],
94 int first);
95 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
96 int rounds, int blocks, u32 const rk2[], u8 iv[],
97 int first);
98
99 asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
100 int rounds, int blocks, u8 iv[],
101 u32 const rk2[]);
102 asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
103 int rounds, int blocks, u8 iv[],
104 u32 const rk2[]);
105
106 asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
107 int blocks, u8 dg[], int enc_before,
108 int enc_after);
109
110 struct cts_cbc_req_ctx {
111 struct scatterlist sg_src[2];
112 struct scatterlist sg_dst[2];
113 struct skcipher_request subreq;
114 };
115
116 struct crypto_aes_xts_ctx {
117 struct crypto_aes_ctx key1;
118 struct crypto_aes_ctx __aligned(8) key2;
119 };
120
121 struct crypto_aes_essiv_cbc_ctx {
122 struct crypto_aes_ctx key1;
123 struct crypto_aes_ctx __aligned(8) key2;
124 struct crypto_shash *hash;
125 };
126
127 struct mac_tfm_ctx {
128 struct crypto_aes_ctx key;
129 u8 __aligned(8) consts[];
130 };
131
132 struct mac_desc_ctx {
133 unsigned int len;
134 u8 dg[AES_BLOCK_SIZE];
135 };
136
137 static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
138 unsigned int key_len)
139 {
140 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
141 int ret;
142
143 ret = aes_expandkey(ctx, in_key, key_len);
144 if (ret)
145 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
146
147 return ret;
148 }
149
150 static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
151 const u8 *in_key, unsigned int key_len)
152 {
153 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
154 int ret;
155
156 ret = xts_verify_key(tfm, in_key, key_len);
157 if (ret)
158 return ret;
159
160 ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
161 if (!ret)
162 ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
163 key_len / 2);
164 if (!ret)
165 return 0;
166
167 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
168 return -EINVAL;
169 }
170
171 static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
172 const u8 *in_key,
173 unsigned int key_len)
174 {
175 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
176 SHASH_DESC_ON_STACK(desc, ctx->hash);
177 u8 digest[SHA256_DIGEST_SIZE];
178 int ret;
179
180 ret = aes_expandkey(&ctx->key1, in_key, key_len);
181 if (ret)
182 goto out;
183
184 desc->tfm = ctx->hash;
185 crypto_shash_digest(desc, in_key, key_len, digest);
186
187 ret = aes_expandkey(&ctx->key2, digest, sizeof(digest));
188 if (ret)
189 goto out;
190
191 return 0;
192 out:
193 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
194 return -EINVAL;
195 }
196
197 static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
198 {
199 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
200 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
201 int err, rounds = 6 + ctx->key_length / 4;
202 struct skcipher_walk walk;
203 unsigned int blocks;
204
205 err = skcipher_walk_virt(&walk, req, false);
206
207 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
208 kernel_neon_begin();
209 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
210 ctx->key_enc, rounds, blocks);
211 kernel_neon_end();
212 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
213 }
214 return err;
215 }
216
217 static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
218 {
219 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
220 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
221 int err, rounds = 6 + ctx->key_length / 4;
222 struct skcipher_walk walk;
223 unsigned int blocks;
224
225 err = skcipher_walk_virt(&walk, req, false);
226
227 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
228 kernel_neon_begin();
229 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
230 ctx->key_dec, rounds, blocks);
231 kernel_neon_end();
232 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
233 }
234 return err;
235 }
236
237 static int cbc_encrypt_walk(struct skcipher_request *req,
238 struct skcipher_walk *walk)
239 {
240 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
241 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
242 int err = 0, rounds = 6 + ctx->key_length / 4;
243 unsigned int blocks;
244
245 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
246 kernel_neon_begin();
247 aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
248 ctx->key_enc, rounds, blocks, walk->iv);
249 kernel_neon_end();
250 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
251 }
252 return err;
253 }
254
255 static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
256 {
257 struct skcipher_walk walk;
258 int err;
259
260 err = skcipher_walk_virt(&walk, req, false);
261 if (err)
262 return err;
263 return cbc_encrypt_walk(req, &walk);
264 }
265
266 static int cbc_decrypt_walk(struct skcipher_request *req,
267 struct skcipher_walk *walk)
268 {
269 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
270 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
271 int err = 0, rounds = 6 + ctx->key_length / 4;
272 unsigned int blocks;
273
274 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
275 kernel_neon_begin();
276 aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
277 ctx->key_dec, rounds, blocks, walk->iv);
278 kernel_neon_end();
279 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
280 }
281 return err;
282 }
283
284 static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
285 {
286 struct skcipher_walk walk;
287 int err;
288
289 err = skcipher_walk_virt(&walk, req, false);
290 if (err)
291 return err;
292 return cbc_decrypt_walk(req, &walk);
293 }
294
295 static int cts_cbc_init_tfm(struct crypto_skcipher *tfm)
296 {
297 crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx));
298 return 0;
299 }
300
301 static int cts_cbc_encrypt(struct skcipher_request *req)
302 {
303 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
304 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
305 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
306 int err, rounds = 6 + ctx->key_length / 4;
307 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
308 struct scatterlist *src = req->src, *dst = req->dst;
309 struct skcipher_walk walk;
310
311 skcipher_request_set_tfm(&rctx->subreq, tfm);
312
313 if (req->cryptlen <= AES_BLOCK_SIZE) {
314 if (req->cryptlen < AES_BLOCK_SIZE)
315 return -EINVAL;
316 cbc_blocks = 1;
317 }
318
319 if (cbc_blocks > 0) {
320 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
321 cbc_blocks * AES_BLOCK_SIZE,
322 req->iv);
323
324 err = skcipher_walk_virt(&walk, &rctx->subreq, false) ?:
325 cbc_encrypt_walk(&rctx->subreq, &walk);
326 if (err)
327 return err;
328
329 if (req->cryptlen == AES_BLOCK_SIZE)
330 return 0;
331
332 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
333 rctx->subreq.cryptlen);
334 if (req->dst != req->src)
335 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
336 rctx->subreq.cryptlen);
337 }
338
339 /* handle ciphertext stealing */
340 skcipher_request_set_crypt(&rctx->subreq, src, dst,
341 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
342 req->iv);
343
344 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
345 if (err)
346 return err;
347
348 kernel_neon_begin();
349 aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
350 ctx->key_enc, rounds, walk.nbytes, walk.iv);
351 kernel_neon_end();
352
353 return skcipher_walk_done(&walk, 0);
354 }
355
356 static int cts_cbc_decrypt(struct skcipher_request *req)
357 {
358 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
359 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
360 struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
361 int err, rounds = 6 + ctx->key_length / 4;
362 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
363 struct scatterlist *src = req->src, *dst = req->dst;
364 struct skcipher_walk walk;
365
366 skcipher_request_set_tfm(&rctx->subreq, tfm);
367
368 if (req->cryptlen <= AES_BLOCK_SIZE) {
369 if (req->cryptlen < AES_BLOCK_SIZE)
370 return -EINVAL;
371 cbc_blocks = 1;
372 }
373
374 if (cbc_blocks > 0) {
375 skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
376 cbc_blocks * AES_BLOCK_SIZE,
377 req->iv);
378
379 err = skcipher_walk_virt(&walk, &rctx->subreq, false) ?:
380 cbc_decrypt_walk(&rctx->subreq, &walk);
381 if (err)
382 return err;
383
384 if (req->cryptlen == AES_BLOCK_SIZE)
385 return 0;
386
387 dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
388 rctx->subreq.cryptlen);
389 if (req->dst != req->src)
390 dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
391 rctx->subreq.cryptlen);
392 }
393
394 /* handle ciphertext stealing */
395 skcipher_request_set_crypt(&rctx->subreq, src, dst,
396 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
397 req->iv);
398
399 err = skcipher_walk_virt(&walk, &rctx->subreq, false);
400 if (err)
401 return err;
402
403 kernel_neon_begin();
404 aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
405 ctx->key_dec, rounds, walk.nbytes, walk.iv);
406 kernel_neon_end();
407
408 return skcipher_walk_done(&walk, 0);
409 }
410
411 static int __maybe_unused essiv_cbc_init_tfm(struct crypto_skcipher *tfm)
412 {
413 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
414
415 ctx->hash = crypto_alloc_shash("sha256", 0, 0);
416
417 return PTR_ERR_OR_ZERO(ctx->hash);
418 }
419
420 static void __maybe_unused essiv_cbc_exit_tfm(struct crypto_skcipher *tfm)
421 {
422 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
423
424 crypto_free_shash(ctx->hash);
425 }
426
427 static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
428 {
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
431 int err, rounds = 6 + ctx->key1.key_length / 4;
432 struct skcipher_walk walk;
433 unsigned int blocks;
434
435 err = skcipher_walk_virt(&walk, req, false);
436
437 blocks = walk.nbytes / AES_BLOCK_SIZE;
438 if (blocks) {
439 kernel_neon_begin();
440 aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
441 ctx->key1.key_enc, rounds, blocks,
442 req->iv, ctx->key2.key_enc);
443 kernel_neon_end();
444 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
445 }
446 return err ?: cbc_encrypt_walk(req, &walk);
447 }
448
449 static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
450 {
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
453 int err, rounds = 6 + ctx->key1.key_length / 4;
454 struct skcipher_walk walk;
455 unsigned int blocks;
456
457 err = skcipher_walk_virt(&walk, req, false);
458
459 blocks = walk.nbytes / AES_BLOCK_SIZE;
460 if (blocks) {
461 kernel_neon_begin();
462 aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
463 ctx->key1.key_dec, rounds, blocks,
464 req->iv, ctx->key2.key_enc);
465 kernel_neon_end();
466 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
467 }
468 return err ?: cbc_decrypt_walk(req, &walk);
469 }
470
471 static int ctr_encrypt(struct skcipher_request *req)
472 {
473 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
474 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
475 int err, rounds = 6 + ctx->key_length / 4;
476 struct skcipher_walk walk;
477 int blocks;
478
479 err = skcipher_walk_virt(&walk, req, false);
480
481 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
482 kernel_neon_begin();
483 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
484 ctx->key_enc, rounds, blocks, walk.iv);
485 kernel_neon_end();
486 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
487 }
488 if (walk.nbytes) {
489 u8 __aligned(8) tail[AES_BLOCK_SIZE];
490 unsigned int nbytes = walk.nbytes;
491 u8 *tdst = walk.dst.virt.addr;
492 u8 *tsrc = walk.src.virt.addr;
493
494 /*
495 * Tell aes_ctr_encrypt() to process a tail block.
496 */
497 blocks = -1;
498
499 kernel_neon_begin();
500 aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
501 blocks, walk.iv);
502 kernel_neon_end();
503 crypto_xor_cpy(tdst, tsrc, tail, nbytes);
504 err = skcipher_walk_done(&walk, 0);
505 }
506
507 return err;
508 }
509
510 static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
511 {
512 const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
513 unsigned long flags;
514
515 /*
516 * Temporarily disable interrupts to avoid races where
517 * cachelines are evicted when the CPU is interrupted
518 * to do something else.
519 */
520 local_irq_save(flags);
521 aes_encrypt(ctx, dst, src);
522 local_irq_restore(flags);
523 }
524
525 static int __maybe_unused ctr_encrypt_sync(struct skcipher_request *req)
526 {
527 if (!crypto_simd_usable())
528 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
529
530 return ctr_encrypt(req);
531 }
532
533 static int __maybe_unused xts_encrypt(struct skcipher_request *req)
534 {
535 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
536 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
537 int err, first, rounds = 6 + ctx->key1.key_length / 4;
538 struct skcipher_walk walk;
539 unsigned int blocks;
540
541 err = skcipher_walk_virt(&walk, req, false);
542
543 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
544 kernel_neon_begin();
545 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
546 ctx->key1.key_enc, rounds, blocks,
547 ctx->key2.key_enc, walk.iv, first);
548 kernel_neon_end();
549 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
550 }
551
552 return err;
553 }
554
555 static int __maybe_unused xts_decrypt(struct skcipher_request *req)
556 {
557 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
558 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
559 int err, first, rounds = 6 + ctx->key1.key_length / 4;
560 struct skcipher_walk walk;
561 unsigned int blocks;
562
563 err = skcipher_walk_virt(&walk, req, false);
564
565 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
566 kernel_neon_begin();
567 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
568 ctx->key1.key_dec, rounds, blocks,
569 ctx->key2.key_enc, walk.iv, first);
570 kernel_neon_end();
571 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
572 }
573
574 return err;
575 }
576
577 static struct skcipher_alg aes_algs[] = { {
578 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
579 .base = {
580 .cra_name = "__ecb(aes)",
581 .cra_driver_name = "__ecb-aes-" MODE,
582 .cra_priority = PRIO,
583 .cra_flags = CRYPTO_ALG_INTERNAL,
584 .cra_blocksize = AES_BLOCK_SIZE,
585 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
586 .cra_module = THIS_MODULE,
587 },
588 .min_keysize = AES_MIN_KEY_SIZE,
589 .max_keysize = AES_MAX_KEY_SIZE,
590 .setkey = skcipher_aes_setkey,
591 .encrypt = ecb_encrypt,
592 .decrypt = ecb_decrypt,
593 }, {
594 .base = {
595 .cra_name = "__cbc(aes)",
596 .cra_driver_name = "__cbc-aes-" MODE,
597 .cra_priority = PRIO,
598 .cra_flags = CRYPTO_ALG_INTERNAL,
599 .cra_blocksize = AES_BLOCK_SIZE,
600 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
601 .cra_module = THIS_MODULE,
602 },
603 .min_keysize = AES_MIN_KEY_SIZE,
604 .max_keysize = AES_MAX_KEY_SIZE,
605 .ivsize = AES_BLOCK_SIZE,
606 .setkey = skcipher_aes_setkey,
607 .encrypt = cbc_encrypt,
608 .decrypt = cbc_decrypt,
609 }, {
610 .base = {
611 .cra_name = "__ctr(aes)",
612 .cra_driver_name = "__ctr-aes-" MODE,
613 .cra_priority = PRIO,
614 .cra_flags = CRYPTO_ALG_INTERNAL,
615 .cra_blocksize = 1,
616 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
617 .cra_module = THIS_MODULE,
618 },
619 .min_keysize = AES_MIN_KEY_SIZE,
620 .max_keysize = AES_MAX_KEY_SIZE,
621 .ivsize = AES_BLOCK_SIZE,
622 .chunksize = AES_BLOCK_SIZE,
623 .setkey = skcipher_aes_setkey,
624 .encrypt = ctr_encrypt,
625 .decrypt = ctr_encrypt,
626 }, {
627 .base = {
628 .cra_name = "ctr(aes)",
629 .cra_driver_name = "ctr-aes-" MODE,
630 .cra_priority = PRIO - 1,
631 .cra_blocksize = 1,
632 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
633 .cra_module = THIS_MODULE,
634 },
635 .min_keysize = AES_MIN_KEY_SIZE,
636 .max_keysize = AES_MAX_KEY_SIZE,
637 .ivsize = AES_BLOCK_SIZE,
638 .chunksize = AES_BLOCK_SIZE,
639 .setkey = skcipher_aes_setkey,
640 .encrypt = ctr_encrypt_sync,
641 .decrypt = ctr_encrypt_sync,
642 }, {
643 .base = {
644 .cra_name = "__xts(aes)",
645 .cra_driver_name = "__xts-aes-" MODE,
646 .cra_priority = PRIO,
647 .cra_flags = CRYPTO_ALG_INTERNAL,
648 .cra_blocksize = AES_BLOCK_SIZE,
649 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
650 .cra_module = THIS_MODULE,
651 },
652 .min_keysize = 2 * AES_MIN_KEY_SIZE,
653 .max_keysize = 2 * AES_MAX_KEY_SIZE,
654 .ivsize = AES_BLOCK_SIZE,
655 .setkey = xts_set_key,
656 .encrypt = xts_encrypt,
657 .decrypt = xts_decrypt,
658 }, {
659 #endif
660 .base = {
661 .cra_name = "__cts(cbc(aes))",
662 .cra_driver_name = "__cts-cbc-aes-" MODE,
663 .cra_priority = PRIO,
664 .cra_flags = CRYPTO_ALG_INTERNAL,
665 .cra_blocksize = AES_BLOCK_SIZE,
666 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
667 .cra_module = THIS_MODULE,
668 },
669 .min_keysize = AES_MIN_KEY_SIZE,
670 .max_keysize = AES_MAX_KEY_SIZE,
671 .ivsize = AES_BLOCK_SIZE,
672 .walksize = 2 * AES_BLOCK_SIZE,
673 .setkey = skcipher_aes_setkey,
674 .encrypt = cts_cbc_encrypt,
675 .decrypt = cts_cbc_decrypt,
676 .init = cts_cbc_init_tfm,
677 }, {
678 .base = {
679 .cra_name = "__essiv(cbc(aes),sha256)",
680 .cra_driver_name = "__essiv-cbc-aes-sha256-" MODE,
681 .cra_priority = PRIO + 1,
682 .cra_flags = CRYPTO_ALG_INTERNAL,
683 .cra_blocksize = AES_BLOCK_SIZE,
684 .cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx),
685 .cra_module = THIS_MODULE,
686 },
687 .min_keysize = AES_MIN_KEY_SIZE,
688 .max_keysize = AES_MAX_KEY_SIZE,
689 .ivsize = AES_BLOCK_SIZE,
690 .setkey = essiv_cbc_set_key,
691 .encrypt = essiv_cbc_encrypt,
692 .decrypt = essiv_cbc_decrypt,
693 .init = essiv_cbc_init_tfm,
694 .exit = essiv_cbc_exit_tfm,
695 } };
696
697 static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
698 unsigned int key_len)
699 {
700 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
701 int err;
702
703 err = aes_expandkey(&ctx->key, in_key, key_len);
704 if (err)
705 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
706
707 return err;
708 }
709
710 static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
711 {
712 u64 a = be64_to_cpu(x->a);
713 u64 b = be64_to_cpu(x->b);
714
715 y->a = cpu_to_be64((a << 1) | (b >> 63));
716 y->b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
717 }
718
719 static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
720 unsigned int key_len)
721 {
722 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
723 be128 *consts = (be128 *)ctx->consts;
724 int rounds = 6 + key_len / 4;
725 int err;
726
727 err = cbcmac_setkey(tfm, in_key, key_len);
728 if (err)
729 return err;
730
731 /* encrypt the zero vector */
732 kernel_neon_begin();
733 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
734 rounds, 1);
735 kernel_neon_end();
736
737 cmac_gf128_mul_by_x(consts, consts);
738 cmac_gf128_mul_by_x(consts + 1, consts);
739
740 return 0;
741 }
742
743 static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
744 unsigned int key_len)
745 {
746 static u8 const ks[3][AES_BLOCK_SIZE] = {
747 { [0 ... AES_BLOCK_SIZE - 1] = 0x1 },
748 { [0 ... AES_BLOCK_SIZE - 1] = 0x2 },
749 { [0 ... AES_BLOCK_SIZE - 1] = 0x3 },
750 };
751
752 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
753 int rounds = 6 + key_len / 4;
754 u8 key[AES_BLOCK_SIZE];
755 int err;
756
757 err = cbcmac_setkey(tfm, in_key, key_len);
758 if (err)
759 return err;
760
761 kernel_neon_begin();
762 aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
763 aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
764 kernel_neon_end();
765
766 return cbcmac_setkey(tfm, key, sizeof(key));
767 }
768
769 static int mac_init(struct shash_desc *desc)
770 {
771 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
772
773 memset(ctx->dg, 0, AES_BLOCK_SIZE);
774 ctx->len = 0;
775
776 return 0;
777 }
778
779 static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
780 u8 dg[], int enc_before, int enc_after)
781 {
782 int rounds = 6 + ctx->key_length / 4;
783
784 if (crypto_simd_usable()) {
785 kernel_neon_begin();
786 aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
787 enc_after);
788 kernel_neon_end();
789 } else {
790 if (enc_before)
791 aes_encrypt(ctx, dg, dg);
792
793 while (blocks--) {
794 crypto_xor(dg, in, AES_BLOCK_SIZE);
795 in += AES_BLOCK_SIZE;
796
797 if (blocks || enc_after)
798 aes_encrypt(ctx, dg, dg);
799 }
800 }
801 }
802
803 static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
804 {
805 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
806 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
807
808 while (len > 0) {
809 unsigned int l;
810
811 if ((ctx->len % AES_BLOCK_SIZE) == 0 &&
812 (ctx->len + len) > AES_BLOCK_SIZE) {
813
814 int blocks = len / AES_BLOCK_SIZE;
815
816 len %= AES_BLOCK_SIZE;
817
818 mac_do_update(&tctx->key, p, blocks, ctx->dg,
819 (ctx->len != 0), (len != 0));
820
821 p += blocks * AES_BLOCK_SIZE;
822
823 if (!len) {
824 ctx->len = AES_BLOCK_SIZE;
825 break;
826 }
827 ctx->len = 0;
828 }
829
830 l = min(len, AES_BLOCK_SIZE - ctx->len);
831
832 if (l <= AES_BLOCK_SIZE) {
833 crypto_xor(ctx->dg + ctx->len, p, l);
834 ctx->len += l;
835 len -= l;
836 p += l;
837 }
838 }
839
840 return 0;
841 }
842
843 static int cbcmac_final(struct shash_desc *desc, u8 *out)
844 {
845 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
846 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
847
848 mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
849
850 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
851
852 return 0;
853 }
854
855 static int cmac_final(struct shash_desc *desc, u8 *out)
856 {
857 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
858 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
859 u8 *consts = tctx->consts;
860
861 if (ctx->len != AES_BLOCK_SIZE) {
862 ctx->dg[ctx->len] ^= 0x80;
863 consts += AES_BLOCK_SIZE;
864 }
865
866 mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
867
868 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
869
870 return 0;
871 }
872
873 static struct shash_alg mac_algs[] = { {
874 .base.cra_name = "cmac(aes)",
875 .base.cra_driver_name = "cmac-aes-" MODE,
876 .base.cra_priority = PRIO,
877 .base.cra_blocksize = AES_BLOCK_SIZE,
878 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
879 2 * AES_BLOCK_SIZE,
880 .base.cra_module = THIS_MODULE,
881
882 .digestsize = AES_BLOCK_SIZE,
883 .init = mac_init,
884 .update = mac_update,
885 .final = cmac_final,
886 .setkey = cmac_setkey,
887 .descsize = sizeof(struct mac_desc_ctx),
888 }, {
889 .base.cra_name = "xcbc(aes)",
890 .base.cra_driver_name = "xcbc-aes-" MODE,
891 .base.cra_priority = PRIO,
892 .base.cra_blocksize = AES_BLOCK_SIZE,
893 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
894 2 * AES_BLOCK_SIZE,
895 .base.cra_module = THIS_MODULE,
896
897 .digestsize = AES_BLOCK_SIZE,
898 .init = mac_init,
899 .update = mac_update,
900 .final = cmac_final,
901 .setkey = xcbc_setkey,
902 .descsize = sizeof(struct mac_desc_ctx),
903 }, {
904 .base.cra_name = "cbcmac(aes)",
905 .base.cra_driver_name = "cbcmac-aes-" MODE,
906 .base.cra_priority = PRIO,
907 .base.cra_blocksize = 1,
908 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
909 .base.cra_module = THIS_MODULE,
910
911 .digestsize = AES_BLOCK_SIZE,
912 .init = mac_init,
913 .update = mac_update,
914 .final = cbcmac_final,
915 .setkey = cbcmac_setkey,
916 .descsize = sizeof(struct mac_desc_ctx),
917 } };
918
919 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
920
921 static void aes_exit(void)
922 {
923 int i;
924
925 for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
926 if (aes_simd_algs[i])
927 simd_skcipher_free(aes_simd_algs[i]);
928
929 crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
930 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
931 }
932
933 static int __init aes_init(void)
934 {
935 struct simd_skcipher_alg *simd;
936 const char *basename;
937 const char *algname;
938 const char *drvname;
939 int err;
940 int i;
941
942 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
943 if (err)
944 return err;
945
946 err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs));
947 if (err)
948 goto unregister_ciphers;
949
950 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
951 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
952 continue;
953
954 algname = aes_algs[i].base.cra_name + 2;
955 drvname = aes_algs[i].base.cra_driver_name + 2;
956 basename = aes_algs[i].base.cra_driver_name;
957 simd = simd_skcipher_create_compat(algname, drvname, basename);
958 err = PTR_ERR(simd);
959 if (IS_ERR(simd))
960 goto unregister_simds;
961
962 aes_simd_algs[i] = simd;
963 }
964
965 return 0;
966
967 unregister_simds:
968 aes_exit();
969 return err;
970 unregister_ciphers:
971 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
972 return err;
973 }
974
975 #ifdef USE_V8_CRYPTO_EXTENSIONS
976 module_cpu_feature_match(AES, aes_init);
977 #else
978 module_init(aes_init);
979 EXPORT_SYMBOL(neon_aes_ecb_encrypt);
980 EXPORT_SYMBOL(neon_aes_cbc_encrypt);
981 #endif
982 module_exit(aes_exit);