]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/xts.c
crypto: crct10dif-generic - fix use via crypto_shash_digest()
[mirror_ubuntu-bionic-kernel.git] / crypto / xts.c
1 /* XTS: as defined in IEEE1619/D16
2 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
3 * (sector sizes which are not a multiple of 16 bytes are,
4 * however currently unsupported)
5 *
6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
7 *
8 * Based on ecb.c
9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 */
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24
25 #include <crypto/xts.h>
26 #include <crypto/b128ops.h>
27 #include <crypto/gf128mul.h>
28
29 #define XTS_BUFFER_SIZE 128u
30
31 struct priv {
32 struct crypto_skcipher *child;
33 struct crypto_cipher *tweak;
34 };
35
36 struct xts_instance_ctx {
37 struct crypto_skcipher_spawn spawn;
38 char name[CRYPTO_MAX_ALG_NAME];
39 };
40
41 struct rctx {
42 le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
43
44 le128 t;
45
46 le128 *ext;
47
48 struct scatterlist srcbuf[2];
49 struct scatterlist dstbuf[2];
50 struct scatterlist *src;
51 struct scatterlist *dst;
52
53 unsigned int left;
54
55 struct skcipher_request subreq;
56 };
57
58 static int setkey(struct crypto_skcipher *parent, const u8 *key,
59 unsigned int keylen)
60 {
61 struct priv *ctx = crypto_skcipher_ctx(parent);
62 struct crypto_skcipher *child;
63 struct crypto_cipher *tweak;
64 int err;
65
66 err = xts_verify_key(parent, key, keylen);
67 if (err)
68 return err;
69
70 keylen /= 2;
71
72 /* we need two cipher instances: one to compute the initial 'tweak'
73 * by encrypting the IV (usually the 'plain' iv) and the other
74 * one to encrypt and decrypt the data */
75
76 /* tweak cipher, uses Key2 i.e. the second half of *key */
77 tweak = ctx->tweak;
78 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
79 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
80 CRYPTO_TFM_REQ_MASK);
81 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
82 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
83 CRYPTO_TFM_RES_MASK);
84 if (err)
85 return err;
86
87 /* data cipher, uses Key1 i.e. the first half of *key */
88 child = ctx->child;
89 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
90 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
91 CRYPTO_TFM_REQ_MASK);
92 err = crypto_skcipher_setkey(child, key, keylen);
93 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
94 CRYPTO_TFM_RES_MASK);
95
96 return err;
97 }
98
99 static int post_crypt(struct skcipher_request *req)
100 {
101 struct rctx *rctx = skcipher_request_ctx(req);
102 le128 *buf = rctx->ext ?: rctx->buf;
103 struct skcipher_request *subreq;
104 const int bs = XTS_BLOCK_SIZE;
105 struct skcipher_walk w;
106 struct scatterlist *sg;
107 unsigned offset;
108 int err;
109
110 subreq = &rctx->subreq;
111 err = skcipher_walk_virt(&w, subreq, false);
112
113 while (w.nbytes) {
114 unsigned int avail = w.nbytes;
115 le128 *wdst;
116
117 wdst = w.dst.virt.addr;
118
119 do {
120 le128_xor(wdst, buf++, wdst);
121 wdst++;
122 } while ((avail -= bs) >= bs);
123
124 err = skcipher_walk_done(&w, avail);
125 }
126
127 rctx->left -= subreq->cryptlen;
128
129 if (err || !rctx->left)
130 goto out;
131
132 rctx->dst = rctx->dstbuf;
133
134 scatterwalk_done(&w.out, 0, 1);
135 sg = w.out.sg;
136 offset = w.out.offset;
137
138 if (rctx->dst != sg) {
139 rctx->dst[0] = *sg;
140 sg_unmark_end(rctx->dst);
141 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
142 }
143 rctx->dst[0].length -= offset - sg->offset;
144 rctx->dst[0].offset = offset;
145
146 out:
147 return err;
148 }
149
150 static int pre_crypt(struct skcipher_request *req)
151 {
152 struct rctx *rctx = skcipher_request_ctx(req);
153 le128 *buf = rctx->ext ?: rctx->buf;
154 struct skcipher_request *subreq;
155 const int bs = XTS_BLOCK_SIZE;
156 struct skcipher_walk w;
157 struct scatterlist *sg;
158 unsigned cryptlen;
159 unsigned offset;
160 bool more;
161 int err;
162
163 subreq = &rctx->subreq;
164 cryptlen = subreq->cryptlen;
165
166 more = rctx->left > cryptlen;
167 if (!more)
168 cryptlen = rctx->left;
169
170 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171 cryptlen, NULL);
172
173 err = skcipher_walk_virt(&w, subreq, false);
174
175 while (w.nbytes) {
176 unsigned int avail = w.nbytes;
177 le128 *wsrc;
178 le128 *wdst;
179
180 wsrc = w.src.virt.addr;
181 wdst = w.dst.virt.addr;
182
183 do {
184 *buf++ = rctx->t;
185 le128_xor(wdst++, &rctx->t, wsrc++);
186 gf128mul_x_ble(&rctx->t, &rctx->t);
187 } while ((avail -= bs) >= bs);
188
189 err = skcipher_walk_done(&w, avail);
190 }
191
192 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193 cryptlen, NULL);
194
195 if (err || !more)
196 goto out;
197
198 rctx->src = rctx->srcbuf;
199
200 scatterwalk_done(&w.in, 0, 1);
201 sg = w.in.sg;
202 offset = w.in.offset;
203
204 if (rctx->src != sg) {
205 rctx->src[0] = *sg;
206 sg_unmark_end(rctx->src);
207 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
208 }
209 rctx->src[0].length -= offset - sg->offset;
210 rctx->src[0].offset = offset;
211
212 out:
213 return err;
214 }
215
216 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
217 {
218 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219 struct rctx *rctx = skcipher_request_ctx(req);
220 struct skcipher_request *subreq;
221 gfp_t gfp;
222
223 subreq = &rctx->subreq;
224 skcipher_request_set_tfm(subreq, ctx->child);
225 skcipher_request_set_callback(subreq, req->base.flags, done, req);
226
227 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228 GFP_ATOMIC;
229 rctx->ext = NULL;
230
231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
238 }
239
240 rctx->src = req->src;
241 rctx->dst = req->dst;
242 rctx->left = req->cryptlen;
243
244 /* calculate first value of T */
245 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
246
247 return 0;
248 }
249
250 static void exit_crypt(struct skcipher_request *req)
251 {
252 struct rctx *rctx = skcipher_request_ctx(req);
253
254 rctx->left = 0;
255
256 if (rctx->ext)
257 kzfree(rctx->ext);
258 }
259
260 static int do_encrypt(struct skcipher_request *req, int err)
261 {
262 struct rctx *rctx = skcipher_request_ctx(req);
263 struct skcipher_request *subreq;
264
265 subreq = &rctx->subreq;
266
267 while (!err && rctx->left) {
268 err = pre_crypt(req) ?:
269 crypto_skcipher_encrypt(subreq) ?:
270 post_crypt(req);
271
272 if (err == -EINPROGRESS || err == -EBUSY)
273 return err;
274 }
275
276 exit_crypt(req);
277 return err;
278 }
279
280 static void encrypt_done(struct crypto_async_request *areq, int err)
281 {
282 struct skcipher_request *req = areq->data;
283 struct skcipher_request *subreq;
284 struct rctx *rctx;
285
286 rctx = skcipher_request_ctx(req);
287
288 if (err == -EINPROGRESS) {
289 if (rctx->left != req->cryptlen)
290 return;
291 goto out;
292 }
293
294 subreq = &rctx->subreq;
295 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
296
297 err = do_encrypt(req, err ?: post_crypt(req));
298 if (rctx->left)
299 return;
300
301 out:
302 skcipher_request_complete(req, err);
303 }
304
305 static int encrypt(struct skcipher_request *req)
306 {
307 return do_encrypt(req, init_crypt(req, encrypt_done));
308 }
309
310 static int do_decrypt(struct skcipher_request *req, int err)
311 {
312 struct rctx *rctx = skcipher_request_ctx(req);
313 struct skcipher_request *subreq;
314
315 subreq = &rctx->subreq;
316
317 while (!err && rctx->left) {
318 err = pre_crypt(req) ?:
319 crypto_skcipher_decrypt(subreq) ?:
320 post_crypt(req);
321
322 if (err == -EINPROGRESS || err == -EBUSY)
323 return err;
324 }
325
326 exit_crypt(req);
327 return err;
328 }
329
330 static void decrypt_done(struct crypto_async_request *areq, int err)
331 {
332 struct skcipher_request *req = areq->data;
333 struct skcipher_request *subreq;
334 struct rctx *rctx;
335
336 rctx = skcipher_request_ctx(req);
337
338 if (err == -EINPROGRESS) {
339 if (rctx->left != req->cryptlen)
340 return;
341 goto out;
342 }
343
344 subreq = &rctx->subreq;
345 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
346
347 err = do_decrypt(req, err ?: post_crypt(req));
348 if (rctx->left)
349 return;
350
351 out:
352 skcipher_request_complete(req, err);
353 }
354
355 static int decrypt(struct skcipher_request *req)
356 {
357 return do_decrypt(req, init_crypt(req, decrypt_done));
358 }
359
360 int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
361 struct scatterlist *ssrc, unsigned int nbytes,
362 struct xts_crypt_req *req)
363 {
364 const unsigned int bsize = XTS_BLOCK_SIZE;
365 const unsigned int max_blks = req->tbuflen / bsize;
366 struct blkcipher_walk walk;
367 unsigned int nblocks;
368 le128 *src, *dst, *t;
369 le128 *t_buf = req->tbuf;
370 int err, i;
371
372 BUG_ON(max_blks < 1);
373
374 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
375
376 err = blkcipher_walk_virt(desc, &walk);
377 nbytes = walk.nbytes;
378 if (!nbytes)
379 return err;
380
381 nblocks = min(nbytes / bsize, max_blks);
382 src = (le128 *)walk.src.virt.addr;
383 dst = (le128 *)walk.dst.virt.addr;
384
385 /* calculate first value of T */
386 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
387
388 i = 0;
389 goto first;
390
391 for (;;) {
392 do {
393 for (i = 0; i < nblocks; i++) {
394 gf128mul_x_ble(&t_buf[i], t);
395 first:
396 t = &t_buf[i];
397
398 /* PP <- T xor P */
399 le128_xor(dst + i, t, src + i);
400 }
401
402 /* CC <- E(Key2,PP) */
403 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
404 nblocks * bsize);
405
406 /* C <- T xor CC */
407 for (i = 0; i < nblocks; i++)
408 le128_xor(dst + i, dst + i, &t_buf[i]);
409
410 src += nblocks;
411 dst += nblocks;
412 nbytes -= nblocks * bsize;
413 nblocks = min(nbytes / bsize, max_blks);
414 } while (nblocks > 0);
415
416 *(le128 *)walk.iv = *t;
417
418 err = blkcipher_walk_done(desc, &walk, nbytes);
419 nbytes = walk.nbytes;
420 if (!nbytes)
421 break;
422
423 nblocks = min(nbytes / bsize, max_blks);
424 src = (le128 *)walk.src.virt.addr;
425 dst = (le128 *)walk.dst.virt.addr;
426 }
427
428 return err;
429 }
430 EXPORT_SYMBOL_GPL(xts_crypt);
431
432 static int init_tfm(struct crypto_skcipher *tfm)
433 {
434 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
435 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
436 struct priv *ctx = crypto_skcipher_ctx(tfm);
437 struct crypto_skcipher *child;
438 struct crypto_cipher *tweak;
439
440 child = crypto_spawn_skcipher(&ictx->spawn);
441 if (IS_ERR(child))
442 return PTR_ERR(child);
443
444 ctx->child = child;
445
446 tweak = crypto_alloc_cipher(ictx->name, 0, 0);
447 if (IS_ERR(tweak)) {
448 crypto_free_skcipher(ctx->child);
449 return PTR_ERR(tweak);
450 }
451
452 ctx->tweak = tweak;
453
454 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
455 sizeof(struct rctx));
456
457 return 0;
458 }
459
460 static void exit_tfm(struct crypto_skcipher *tfm)
461 {
462 struct priv *ctx = crypto_skcipher_ctx(tfm);
463
464 crypto_free_skcipher(ctx->child);
465 crypto_free_cipher(ctx->tweak);
466 }
467
468 static void free(struct skcipher_instance *inst)
469 {
470 crypto_drop_skcipher(skcipher_instance_ctx(inst));
471 kfree(inst);
472 }
473
474 static int create(struct crypto_template *tmpl, struct rtattr **tb)
475 {
476 struct skcipher_instance *inst;
477 struct crypto_attr_type *algt;
478 struct xts_instance_ctx *ctx;
479 struct skcipher_alg *alg;
480 const char *cipher_name;
481 u32 mask;
482 int err;
483
484 algt = crypto_get_attr_type(tb);
485 if (IS_ERR(algt))
486 return PTR_ERR(algt);
487
488 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
489 return -EINVAL;
490
491 cipher_name = crypto_attr_alg_name(tb[1]);
492 if (IS_ERR(cipher_name))
493 return PTR_ERR(cipher_name);
494
495 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
496 if (!inst)
497 return -ENOMEM;
498
499 ctx = skcipher_instance_ctx(inst);
500
501 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
502
503 mask = crypto_requires_off(algt->type, algt->mask,
504 CRYPTO_ALG_NEED_FALLBACK |
505 CRYPTO_ALG_ASYNC);
506
507 err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
508 if (err == -ENOENT) {
509 err = -ENAMETOOLONG;
510 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
511 cipher_name) >= CRYPTO_MAX_ALG_NAME)
512 goto err_free_inst;
513
514 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
515 }
516
517 if (err)
518 goto err_free_inst;
519
520 alg = crypto_skcipher_spawn_alg(&ctx->spawn);
521
522 err = -EINVAL;
523 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
524 goto err_drop_spawn;
525
526 if (crypto_skcipher_alg_ivsize(alg))
527 goto err_drop_spawn;
528
529 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
530 &alg->base);
531 if (err)
532 goto err_drop_spawn;
533
534 err = -EINVAL;
535 cipher_name = alg->base.cra_name;
536
537 /* Alas we screwed up the naming so we have to mangle the
538 * cipher name.
539 */
540 if (!strncmp(cipher_name, "ecb(", 4)) {
541 unsigned len;
542
543 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
544 if (len < 2 || len >= sizeof(ctx->name))
545 goto err_drop_spawn;
546
547 if (ctx->name[len - 1] != ')')
548 goto err_drop_spawn;
549
550 ctx->name[len - 1] = 0;
551
552 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
553 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
554 err = -ENAMETOOLONG;
555 goto err_drop_spawn;
556 }
557 } else
558 goto err_drop_spawn;
559
560 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
561 inst->alg.base.cra_priority = alg->base.cra_priority;
562 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
563 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
564 (__alignof__(u64) - 1);
565
566 inst->alg.ivsize = XTS_BLOCK_SIZE;
567 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
568 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
569
570 inst->alg.base.cra_ctxsize = sizeof(struct priv);
571
572 inst->alg.init = init_tfm;
573 inst->alg.exit = exit_tfm;
574
575 inst->alg.setkey = setkey;
576 inst->alg.encrypt = encrypt;
577 inst->alg.decrypt = decrypt;
578
579 inst->free = free;
580
581 err = skcipher_register_instance(tmpl, inst);
582 if (err)
583 goto err_drop_spawn;
584
585 out:
586 return err;
587
588 err_drop_spawn:
589 crypto_drop_skcipher(&ctx->spawn);
590 err_free_inst:
591 kfree(inst);
592 goto out;
593 }
594
595 static struct crypto_template crypto_tmpl = {
596 .name = "xts",
597 .create = create,
598 .module = THIS_MODULE,
599 };
600
601 static int __init crypto_module_init(void)
602 {
603 return crypto_register_template(&crypto_tmpl);
604 }
605
606 static void __exit crypto_module_exit(void)
607 {
608 crypto_unregister_template(&crypto_tmpl);
609 }
610
611 module_init(crypto_module_init);
612 module_exit(crypto_module_exit);
613
614 MODULE_LICENSE("GPL");
615 MODULE_DESCRIPTION("XTS block cipher mode");
616 MODULE_ALIAS_CRYPTO("xts");