]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - crypto/blkcipher.c
Merge tag 'gpio-v4.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-artful-kernel.git] / crypto / blkcipher.c
CommitLineData
5cde0af2
HX
1/*
2 * Block chaining cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
d1a2fd50 17#include <crypto/aead.h>
ecfc4329 18#include <crypto/internal/skcipher.h>
42c271c6 19#include <crypto/scatterwalk.h>
5cde0af2 20#include <linux/errno.h>
fb469840 21#include <linux/hardirq.h>
5cde0af2 22#include <linux/kernel.h>
5cde0af2 23#include <linux/module.h>
5cde0af2
HX
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/string.h>
50496a1f
SK
27#include <linux/cryptouser.h>
28#include <net/netlink.h>
5cde0af2
HX
29
30#include "internal.h"
5cde0af2
HX
31
32enum {
33 BLKCIPHER_WALK_PHYS = 1 << 0,
34 BLKCIPHER_WALK_SLOW = 1 << 1,
35 BLKCIPHER_WALK_COPY = 1 << 2,
36 BLKCIPHER_WALK_DIFF = 1 << 3,
37};
38
39static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 struct blkcipher_walk *walk);
43
44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45{
f0dfc0b0 46 walk->src.virt.addr = scatterwalk_map(&walk->in);
5cde0af2
HX
47}
48
49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50{
f0dfc0b0 51 walk->dst.virt.addr = scatterwalk_map(&walk->out);
5cde0af2
HX
52}
53
54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55{
f0dfc0b0 56 scatterwalk_unmap(walk->src.virt.addr);
5cde0af2
HX
57}
58
59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60{
f0dfc0b0 61 scatterwalk_unmap(walk->dst.virt.addr);
5cde0af2
HX
62}
63
e4630f9f
HX
64/* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
66 */
5cde0af2
HX
67static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
68{
e4630f9f 69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
5aaff0c8 70 return max(start, end_page);
5cde0af2
HX
71}
72
822be00f 73static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
5cde0af2
HX
74 unsigned int bsize)
75{
76 u8 *addr;
5cde0af2 77
822be00f 78 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
5cde0af2
HX
79 addr = blkcipher_get_spot(addr, bsize);
80 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 return bsize;
82}
83
84static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85 unsigned int n)
86{
5cde0af2
HX
87 if (walk->flags & BLKCIPHER_WALK_COPY) {
88 blkcipher_map_dst(walk);
89 memcpy(walk->dst.virt.addr, walk->page, n);
90 blkcipher_unmap_dst(walk);
91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
5cde0af2
HX
92 if (walk->flags & BLKCIPHER_WALK_DIFF)
93 blkcipher_unmap_dst(walk);
61ecdb80 94 blkcipher_unmap_src(walk);
5cde0af2
HX
95 }
96
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
99
100 return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err)
105{
5cde0af2
HX
106 unsigned int nbytes = 0;
107
108 if (likely(err >= 0)) {
7607bd8f 109 unsigned int n = walk->nbytes - err;
5cde0af2
HX
110
111 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
7607bd8f
HX
112 n = blkcipher_done_fast(walk, n);
113 else if (WARN_ON(err)) {
114 err = -EINVAL;
115 goto err;
116 } else
822be00f 117 n = blkcipher_done_slow(walk, n);
5cde0af2
HX
118
119 nbytes = walk->total - n;
120 err = 0;
121 }
122
123 scatterwalk_done(&walk->in, 0, nbytes);
124 scatterwalk_done(&walk->out, 1, nbytes);
125
bac1b5c4 126err:
5cde0af2
HX
127 walk->total = nbytes;
128 walk->nbytes = nbytes;
129
130 if (nbytes) {
131 crypto_yield(desc->flags);
132 return blkcipher_walk_next(desc, walk);
133 }
134
135 if (walk->iv != desc->info)
822be00f 136 memcpy(desc->info, walk->iv, walk->ivsize);
5cde0af2
HX
137 if (walk->buffer != walk->page)
138 kfree(walk->buffer);
139 if (walk->page)
140 free_page((unsigned long)walk->page);
141
142 return err;
143}
144EXPORT_SYMBOL_GPL(blkcipher_walk_done);
145
146static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147 struct blkcipher_walk *walk,
148 unsigned int bsize,
149 unsigned int alignmask)
150{
151 unsigned int n;
70613783 152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
5cde0af2
HX
153
154 if (walk->buffer)
155 goto ok;
156
157 walk->buffer = walk->page;
158 if (walk->buffer)
159 goto ok;
160
2614de1b 161 n = aligned_bsize * 3 - (alignmask + 1) +
e4630f9f 162 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
5cde0af2
HX
163 walk->buffer = kmalloc(n, GFP_ATOMIC);
164 if (!walk->buffer)
165 return blkcipher_walk_done(desc, walk, -ENOMEM);
166
167ok:
168 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
169 alignmask + 1);
170 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
70613783
HX
171 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172 aligned_bsize, bsize);
5cde0af2
HX
173
174 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
175
176 walk->nbytes = bsize;
177 walk->flags |= BLKCIPHER_WALK_SLOW;
178
179 return 0;
180}
181
182static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
183{
184 u8 *tmp = walk->page;
185
186 blkcipher_map_src(walk);
187 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188 blkcipher_unmap_src(walk);
189
190 walk->src.virt.addr = tmp;
191 walk->dst.virt.addr = tmp;
192
193 return 0;
194}
195
196static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197 struct blkcipher_walk *walk)
198{
199 unsigned long diff;
200
201 walk->src.phys.page = scatterwalk_page(&walk->in);
202 walk->src.phys.offset = offset_in_page(walk->in.offset);
203 walk->dst.phys.page = scatterwalk_page(&walk->out);
204 walk->dst.phys.offset = offset_in_page(walk->out.offset);
205
206 if (walk->flags & BLKCIPHER_WALK_PHYS)
207 return 0;
208
209 diff = walk->src.phys.offset - walk->dst.phys.offset;
210 diff |= walk->src.virt.page - walk->dst.virt.page;
211
212 blkcipher_map_src(walk);
213 walk->dst.virt.addr = walk->src.virt.addr;
214
215 if (diff) {
216 walk->flags |= BLKCIPHER_WALK_DIFF;
217 blkcipher_map_dst(walk);
218 }
219
220 return 0;
221}
222
223static int blkcipher_walk_next(struct blkcipher_desc *desc,
224 struct blkcipher_walk *walk)
225{
7607bd8f 226 unsigned int bsize;
5cde0af2
HX
227 unsigned int n;
228 int err;
229
230 n = walk->total;
822be00f 231 if (unlikely(n < walk->cipher_blocksize)) {
5cde0af2
HX
232 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
233 return blkcipher_walk_done(desc, walk, -EINVAL);
234 }
235
acdb04d0
HX
236 bsize = min(walk->walk_blocksize, n);
237
5cde0af2
HX
238 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239 BLKCIPHER_WALK_DIFF);
822be00f
AB
240 if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
241 !scatterwalk_aligned(&walk->out, walk->alignmask)) {
5cde0af2
HX
242 walk->flags |= BLKCIPHER_WALK_COPY;
243 if (!walk->page) {
244 walk->page = (void *)__get_free_page(GFP_ATOMIC);
245 if (!walk->page)
246 n = 0;
247 }
248 }
249
250 n = scatterwalk_clamp(&walk->in, n);
251 n = scatterwalk_clamp(&walk->out, n);
252
253 if (unlikely(n < bsize)) {
822be00f 254 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
5cde0af2
HX
255 goto set_phys_lowmem;
256 }
257
258 walk->nbytes = n;
259 if (walk->flags & BLKCIPHER_WALK_COPY) {
260 err = blkcipher_next_copy(walk);
261 goto set_phys_lowmem;
262 }
263
264 return blkcipher_next_fast(desc, walk);
265
266set_phys_lowmem:
267 if (walk->flags & BLKCIPHER_WALK_PHYS) {
268 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270 walk->src.phys.offset &= PAGE_SIZE - 1;
271 walk->dst.phys.offset &= PAGE_SIZE - 1;
272 }
273 return err;
274}
275
822be00f 276static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
5cde0af2 277{
822be00f
AB
278 unsigned bs = walk->walk_blocksize;
279 unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
280 unsigned int size = aligned_bs * 2 +
281 walk->ivsize + max(aligned_bs, walk->ivsize) -
282 (walk->alignmask + 1);
5cde0af2
HX
283 u8 *iv;
284
822be00f 285 size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
5cde0af2
HX
286 walk->buffer = kmalloc(size, GFP_ATOMIC);
287 if (!walk->buffer)
288 return -ENOMEM;
289
822be00f 290 iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
70613783
HX
291 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
292 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
822be00f 293 iv = blkcipher_get_spot(iv, walk->ivsize);
5cde0af2 294
822be00f 295 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
5cde0af2
HX
296 return 0;
297}
298
299int blkcipher_walk_virt(struct blkcipher_desc *desc,
300 struct blkcipher_walk *walk)
301{
302 walk->flags &= ~BLKCIPHER_WALK_PHYS;
822be00f
AB
303 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
304 walk->cipher_blocksize = walk->walk_blocksize;
305 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
306 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
5cde0af2
HX
307 return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
310
311int blkcipher_walk_phys(struct blkcipher_desc *desc,
312 struct blkcipher_walk *walk)
313{
314 walk->flags |= BLKCIPHER_WALK_PHYS;
822be00f
AB
315 walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
316 walk->cipher_blocksize = walk->walk_blocksize;
317 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
318 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
5cde0af2
HX
319 return blkcipher_walk_first(desc, walk);
320}
321EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
322
323static int blkcipher_walk_first(struct blkcipher_desc *desc,
324 struct blkcipher_walk *walk)
325{
fb469840
HX
326 if (WARN_ON_ONCE(in_irq()))
327 return -EDEADLK;
328
70d906bc 329 walk->iv = desc->info;
5cde0af2
HX
330 walk->nbytes = walk->total;
331 if (unlikely(!walk->total))
332 return 0;
333
334 walk->buffer = NULL;
822be00f
AB
335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336 int err = blkcipher_copy_iv(walk);
5cde0af2
HX
337 if (err)
338 return err;
339 }
340
341 scatterwalk_start(&walk->in, walk->in.sg);
342 scatterwalk_start(&walk->out, walk->out.sg);
343 walk->page = NULL;
344
345 return blkcipher_walk_next(desc, walk);
346}
347
7607bd8f
HX
348int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
349 struct blkcipher_walk *walk,
350 unsigned int blocksize)
351{
352 walk->flags &= ~BLKCIPHER_WALK_PHYS;
822be00f
AB
353 walk->walk_blocksize = blocksize;
354 walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
355 walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
356 walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
7607bd8f
HX
357 return blkcipher_walk_first(desc, walk);
358}
359EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
360
4f7f1d7c
AB
361int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
362 struct blkcipher_walk *walk,
363 struct crypto_aead *tfm,
364 unsigned int blocksize)
365{
366 walk->flags &= ~BLKCIPHER_WALK_PHYS;
367 walk->walk_blocksize = blocksize;
368 walk->cipher_blocksize = crypto_aead_blocksize(tfm);
369 walk->ivsize = crypto_aead_ivsize(tfm);
370 walk->alignmask = crypto_aead_alignmask(tfm);
371 return blkcipher_walk_first(desc, walk);
372}
373EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
374
791b4d5f
HX
375static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
376 unsigned int keylen)
ca7c3938
SS
377{
378 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
379 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
380 int ret;
381 u8 *buffer, *alignbuffer;
382 unsigned long absize;
383
384 absize = keylen + alignmask;
385 buffer = kmalloc(absize, GFP_ATOMIC);
386 if (!buffer)
387 return -ENOMEM;
388
389 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
390 memcpy(alignbuffer, key, keylen);
391 ret = cipher->setkey(tfm, alignbuffer, keylen);
06817176 392 memset(alignbuffer, 0, keylen);
ca7c3938
SS
393 kfree(buffer);
394 return ret;
395}
396
791b4d5f 397static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
5cde0af2
HX
398{
399 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
ca7c3938 400 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
5cde0af2
HX
401
402 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
403 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
404 return -EINVAL;
405 }
406
ca7c3938
SS
407 if ((unsigned long)key & alignmask)
408 return setkey_unaligned(tfm, key, keylen);
409
5cde0af2
HX
410 return cipher->setkey(tfm, key, keylen);
411}
412
32e3983f
HX
413static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
414 unsigned int keylen)
415{
416 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
417}
418
419static int async_encrypt(struct ablkcipher_request *req)
420{
421 struct crypto_tfm *tfm = req->base.tfm;
422 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
423 struct blkcipher_desc desc = {
424 .tfm = __crypto_blkcipher_cast(tfm),
425 .info = req->info,
426 .flags = req->base.flags,
427 };
428
429
430 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
431}
432
433static int async_decrypt(struct ablkcipher_request *req)
434{
435 struct crypto_tfm *tfm = req->base.tfm;
436 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437 struct blkcipher_desc desc = {
438 .tfm = __crypto_blkcipher_cast(tfm),
439 .info = req->info,
440 .flags = req->base.flags,
441 };
442
443 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
444}
445
27d2a330
HX
446static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
447 u32 mask)
5cde0af2
HX
448{
449 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
450 unsigned int len = alg->cra_ctxsize;
451
332f8840
HX
452 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
453 cipher->ivsize) {
5cde0af2
HX
454 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
455 len += cipher->ivsize;
456 }
457
458 return len;
459}
460
32e3983f
HX
461static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
462{
463 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
464 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
465
466 crt->setkey = async_setkey;
467 crt->encrypt = async_encrypt;
468 crt->decrypt = async_decrypt;
ecfc4329 469 crt->base = __crypto_ablkcipher_cast(tfm);
32e3983f
HX
470 crt->ivsize = alg->ivsize;
471
472 return 0;
473}
474
475static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
5cde0af2
HX
476{
477 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
478 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
479 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
480 unsigned long addr;
481
5cde0af2
HX
482 crt->setkey = setkey;
483 crt->encrypt = alg->encrypt;
484 crt->decrypt = alg->decrypt;
485
486 addr = (unsigned long)crypto_tfm_ctx(tfm);
487 addr = ALIGN(addr, align);
488 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
489 crt->iv = (void *)addr;
490
491 return 0;
492}
493
32e3983f
HX
494static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
495{
496 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
497
498 if (alg->ivsize > PAGE_SIZE / 8)
499 return -EINVAL;
500
332f8840 501 if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
32e3983f
HX
502 return crypto_init_blkcipher_ops_sync(tfm);
503 else
504 return crypto_init_blkcipher_ops_async(tfm);
505}
506
3acc8473 507#ifdef CONFIG_NET
50496a1f
SK
508static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
509{
510 struct crypto_report_blkcipher rblkcipher;
511
9a5467bf
MK
512 strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
513 strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
514 sizeof(rblkcipher.geniv));
50496a1f
SK
515
516 rblkcipher.blocksize = alg->cra_blocksize;
517 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
518 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
519 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
520
6662df33
DM
521 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
522 sizeof(struct crypto_report_blkcipher), &rblkcipher))
523 goto nla_put_failure;
50496a1f
SK
524 return 0;
525
526nla_put_failure:
527 return -EMSGSIZE;
528}
3acc8473
HX
529#else
530static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
531{
532 return -ENOSYS;
533}
534#endif
50496a1f 535
5cde0af2 536static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
03f5d8ce 537 __attribute__ ((unused));
5cde0af2
HX
538static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
539{
540 seq_printf(m, "type : blkcipher\n");
541 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
542 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
543 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
544 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
23508e11
HX
545 seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
546 "<default>");
5cde0af2
HX
547}
548
549const struct crypto_type crypto_blkcipher_type = {
550 .ctxsize = crypto_blkcipher_ctxsize,
551 .init = crypto_init_blkcipher_ops,
552#ifdef CONFIG_PROC_FS
553 .show = crypto_blkcipher_show,
554#endif
50496a1f 555 .report = crypto_blkcipher_report,
5cde0af2
HX
556};
557EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
558
559MODULE_LICENSE("GPL");
560MODULE_DESCRIPTION("Generic block chaining cipher type");