]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - crypto/skcipher.c
block: provide plug based way of signaling forced no-wait semantics
[mirror_ubuntu-jammy-kernel.git] / crypto / skcipher.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
7a7ffe65
HX
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
7a7ffe65
HX
10 */
11
b286d8b1 12#include <crypto/internal/aead.h>
7a7ffe65 13#include <crypto/internal/skcipher.h>
b286d8b1 14#include <crypto/scatterwalk.h>
7a7ffe65 15#include <linux/bug.h>
4e6c3df4 16#include <linux/cryptouser.h>
d8c34b94 17#include <linux/compiler.h>
b286d8b1 18#include <linux/list.h>
7a7ffe65 19#include <linux/module.h>
4e6c3df4
HX
20#include <linux/rtnetlink.h>
21#include <linux/seq_file.h>
22#include <net/netlink.h>
7a7ffe65
HX
23
24#include "internal.h"
25
b286d8b1
HX
26enum {
27 SKCIPHER_WALK_PHYS = 1 << 0,
28 SKCIPHER_WALK_SLOW = 1 << 1,
29 SKCIPHER_WALK_COPY = 1 << 2,
30 SKCIPHER_WALK_DIFF = 1 << 3,
31 SKCIPHER_WALK_SLEEP = 1 << 4,
32};
33
34struct skcipher_walk_buffer {
35 struct list_head entry;
36 struct scatter_walk dst;
37 unsigned int len;
38 u8 *data;
39 u8 buffer[];
40};
41
42static int skcipher_walk_next(struct skcipher_walk *walk);
43
44static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
45{
46 if (PageHighMem(scatterwalk_page(walk)))
47 kunmap_atomic(vaddr);
48}
49
50static inline void *skcipher_map(struct scatter_walk *walk)
51{
52 struct page *page = scatterwalk_page(walk);
53
54 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
55 offset_in_page(walk->offset);
56}
57
58static inline void skcipher_map_src(struct skcipher_walk *walk)
59{
60 walk->src.virt.addr = skcipher_map(&walk->in);
61}
62
63static inline void skcipher_map_dst(struct skcipher_walk *walk)
64{
65 walk->dst.virt.addr = skcipher_map(&walk->out);
66}
67
68static inline void skcipher_unmap_src(struct skcipher_walk *walk)
69{
70 skcipher_unmap(&walk->in, walk->src.virt.addr);
71}
72
73static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->out, walk->dst.virt.addr);
76}
77
78static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
79{
80 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
81}
82
83/* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
85 */
86static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
87{
88 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
89
90 return max(start, end_page);
91}
92
0ba3c026 93static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
b286d8b1
HX
94{
95 u8 *addr;
96
97 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
0ba3c026 101 return 0;
b286d8b1
HX
102}
103
104int skcipher_walk_done(struct skcipher_walk *walk, int err)
105{
0ba3c026
HX
106 unsigned int n = walk->nbytes;
107 unsigned int nbytes = 0;
8088d3dd 108
0ba3c026 109 if (!n)
8088d3dd
EB
110 goto finish;
111
0ba3c026
HX
112 if (likely(err >= 0)) {
113 n -= err;
114 nbytes = walk->total - n;
115 }
8088d3dd
EB
116
117 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
118 SKCIPHER_WALK_SLOW |
119 SKCIPHER_WALK_COPY |
120 SKCIPHER_WALK_DIFF)))) {
b286d8b1
HX
121unmap_src:
122 skcipher_unmap_src(walk);
123 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
124 skcipher_unmap_dst(walk);
125 goto unmap_src;
126 } else if (walk->flags & SKCIPHER_WALK_COPY) {
127 skcipher_map_dst(walk);
128 memcpy(walk->dst.virt.addr, walk->page, n);
129 skcipher_unmap_dst(walk);
130 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
0ba3c026 131 if (err > 0) {
dcaca01a
EB
132 /*
133 * Didn't process all bytes. Either the algorithm is
134 * broken, or this was the last step and it turned out
135 * the message wasn't evenly divisible into blocks but
136 * the algorithm requires it.
137 */
b286d8b1 138 err = -EINVAL;
0ba3c026
HX
139 nbytes = 0;
140 } else
141 n = skcipher_done_slow(walk, n);
b286d8b1
HX
142 }
143
0ba3c026
HX
144 if (err > 0)
145 err = 0;
146
147 walk->total = nbytes;
148 walk->nbytes = 0;
149
b286d8b1
HX
150 scatterwalk_advance(&walk->in, n);
151 scatterwalk_advance(&walk->out, n);
0ba3c026
HX
152 scatterwalk_done(&walk->in, 0, nbytes);
153 scatterwalk_done(&walk->out, 1, nbytes);
b286d8b1 154
0ba3c026 155 if (nbytes) {
b286d8b1
HX
156 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
157 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
158 return skcipher_walk_next(walk);
159 }
160
0ba3c026 161finish:
b286d8b1
HX
162 /* Short-circuit for the common/fast path. */
163 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
164 goto out;
165
166 if (walk->flags & SKCIPHER_WALK_PHYS)
167 goto out;
168
169 if (walk->iv != walk->oiv)
170 memcpy(walk->oiv, walk->iv, walk->ivsize);
171 if (walk->buffer != walk->page)
172 kfree(walk->buffer);
173 if (walk->page)
174 free_page((unsigned long)walk->page);
175
176out:
177 return err;
178}
179EXPORT_SYMBOL_GPL(skcipher_walk_done);
180
181void skcipher_walk_complete(struct skcipher_walk *walk, int err)
182{
183 struct skcipher_walk_buffer *p, *tmp;
184
185 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
186 u8 *data;
187
188 if (err)
189 goto done;
190
191 data = p->data;
192 if (!data) {
193 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
c821f6ab 194 data = skcipher_get_spot(data, walk->stride);
b286d8b1
HX
195 }
196
197 scatterwalk_copychunks(data, &p->dst, p->len, 1);
198
c821f6ab 199 if (offset_in_page(p->data) + p->len + walk->stride >
b286d8b1
HX
200 PAGE_SIZE)
201 free_page((unsigned long)p->data);
202
203done:
204 list_del(&p->entry);
205 kfree(p);
206 }
207
208 if (!err && walk->iv != walk->oiv)
209 memcpy(walk->oiv, walk->iv, walk->ivsize);
210 if (walk->buffer != walk->page)
211 kfree(walk->buffer);
212 if (walk->page)
213 free_page((unsigned long)walk->page);
214}
215EXPORT_SYMBOL_GPL(skcipher_walk_complete);
216
217static void skcipher_queue_write(struct skcipher_walk *walk,
218 struct skcipher_walk_buffer *p)
219{
220 p->dst = walk->out;
221 list_add_tail(&p->entry, &walk->buffers);
222}
223
224static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
225{
226 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
227 unsigned alignmask = walk->alignmask;
228 struct skcipher_walk_buffer *p;
229 unsigned a;
230 unsigned n;
231 u8 *buffer;
232 void *v;
233
234 if (!phys) {
18e615ad
AB
235 if (!walk->buffer)
236 walk->buffer = walk->page;
237 buffer = walk->buffer;
b286d8b1
HX
238 if (buffer)
239 goto ok;
240 }
241
242 /* Start with the minimum alignment of kmalloc. */
243 a = crypto_tfm_ctx_alignment() - 1;
244 n = bsize;
245
246 if (phys) {
247 /* Calculate the minimum alignment of p->buffer. */
248 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
249 n += sizeof(*p);
250 }
251
252 /* Minimum size to align p->buffer by alignmask. */
253 n += alignmask & ~a;
254
255 /* Minimum size to ensure p->buffer does not straddle a page. */
256 n += (bsize - 1) & ~(alignmask | a);
257
258 v = kzalloc(n, skcipher_walk_gfp(walk));
259 if (!v)
260 return skcipher_walk_done(walk, -ENOMEM);
261
262 if (phys) {
263 p = v;
264 p->len = bsize;
265 skcipher_queue_write(walk, p);
266 buffer = p->buffer;
267 } else {
268 walk->buffer = v;
269 buffer = v;
270 }
271
272ok:
273 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
274 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
275 walk->src.virt.addr = walk->dst.virt.addr;
276
277 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
278
279 walk->nbytes = bsize;
280 walk->flags |= SKCIPHER_WALK_SLOW;
281
282 return 0;
283}
284
285static int skcipher_next_copy(struct skcipher_walk *walk)
286{
287 struct skcipher_walk_buffer *p;
288 u8 *tmp = walk->page;
289
290 skcipher_map_src(walk);
291 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
292 skcipher_unmap_src(walk);
293
294 walk->src.virt.addr = tmp;
295 walk->dst.virt.addr = tmp;
296
297 if (!(walk->flags & SKCIPHER_WALK_PHYS))
298 return 0;
299
300 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
301 if (!p)
302 return -ENOMEM;
303
304 p->data = walk->page;
305 p->len = walk->nbytes;
306 skcipher_queue_write(walk, p);
307
c821f6ab 308 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
b286d8b1
HX
309 PAGE_SIZE)
310 walk->page = NULL;
311 else
312 walk->page += walk->nbytes;
313
314 return 0;
315}
316
317static int skcipher_next_fast(struct skcipher_walk *walk)
318{
319 unsigned long diff;
320
321 walk->src.phys.page = scatterwalk_page(&walk->in);
322 walk->src.phys.offset = offset_in_page(walk->in.offset);
323 walk->dst.phys.page = scatterwalk_page(&walk->out);
324 walk->dst.phys.offset = offset_in_page(walk->out.offset);
325
326 if (walk->flags & SKCIPHER_WALK_PHYS)
327 return 0;
328
329 diff = walk->src.phys.offset - walk->dst.phys.offset;
330 diff |= walk->src.virt.page - walk->dst.virt.page;
331
332 skcipher_map_src(walk);
333 walk->dst.virt.addr = walk->src.virt.addr;
334
335 if (diff) {
336 walk->flags |= SKCIPHER_WALK_DIFF;
337 skcipher_map_dst(walk);
338 }
339
340 return 0;
341}
342
343static int skcipher_walk_next(struct skcipher_walk *walk)
344{
345 unsigned int bsize;
346 unsigned int n;
347 int err;
348
349 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
350 SKCIPHER_WALK_DIFF);
351
352 n = walk->total;
c821f6ab 353 bsize = min(walk->stride, max(n, walk->blocksize));
b286d8b1
HX
354 n = scatterwalk_clamp(&walk->in, n);
355 n = scatterwalk_clamp(&walk->out, n);
356
357 if (unlikely(n < bsize)) {
358 if (unlikely(walk->total < walk->blocksize))
359 return skcipher_walk_done(walk, -EINVAL);
360
361slow_path:
362 err = skcipher_next_slow(walk, bsize);
363 goto set_phys_lowmem;
364 }
365
366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
367 if (!walk->page) {
368 gfp_t gfp = skcipher_walk_gfp(walk);
369
370 walk->page = (void *)__get_free_page(gfp);
371 if (!walk->page)
372 goto slow_path;
373 }
374
375 walk->nbytes = min_t(unsigned, n,
376 PAGE_SIZE - offset_in_page(walk->page));
377 walk->flags |= SKCIPHER_WALK_COPY;
378 err = skcipher_next_copy(walk);
379 goto set_phys_lowmem;
380 }
381
382 walk->nbytes = n;
383
384 return skcipher_next_fast(walk);
385
386set_phys_lowmem:
387 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
388 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
389 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
390 walk->src.phys.offset &= PAGE_SIZE - 1;
391 walk->dst.phys.offset &= PAGE_SIZE - 1;
392 }
393 return err;
394}
b286d8b1
HX
395
396static int skcipher_copy_iv(struct skcipher_walk *walk)
397{
398 unsigned a = crypto_tfm_ctx_alignment() - 1;
399 unsigned alignmask = walk->alignmask;
400 unsigned ivsize = walk->ivsize;
c821f6ab 401 unsigned bs = walk->stride;
b286d8b1
HX
402 unsigned aligned_bs;
403 unsigned size;
404 u8 *iv;
405
0567fc9e 406 aligned_bs = ALIGN(bs, alignmask + 1);
b286d8b1
HX
407
408 /* Minimum size to align buffer by alignmask. */
409 size = alignmask & ~a;
410
411 if (walk->flags & SKCIPHER_WALK_PHYS)
412 size += ivsize;
413 else {
414 size += aligned_bs + ivsize;
415
416 /* Minimum size to ensure buffer does not straddle a page. */
417 size += (bs - 1) & ~(alignmask | a);
418 }
419
420 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
421 if (!walk->buffer)
422 return -ENOMEM;
423
424 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
425 iv = skcipher_get_spot(iv, bs) + aligned_bs;
426
427 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
428 return 0;
429}
430
431static int skcipher_walk_first(struct skcipher_walk *walk)
432{
b286d8b1
HX
433 if (WARN_ON_ONCE(in_irq()))
434 return -EDEADLK;
435
b286d8b1
HX
436 walk->buffer = NULL;
437 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
438 int err = skcipher_copy_iv(walk);
439 if (err)
440 return err;
441 }
442
443 walk->page = NULL;
b286d8b1
HX
444
445 return skcipher_walk_next(walk);
446}
447
448static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 struct skcipher_request *req)
450{
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452
0cabf2af
HX
453 walk->total = req->cryptlen;
454 walk->nbytes = 0;
2b4f27c3
EB
455 walk->iv = req->iv;
456 walk->oiv = req->iv;
0cabf2af
HX
457
458 if (unlikely(!walk->total))
459 return 0;
460
b286d8b1
HX
461 scatterwalk_start(&walk->in, req->src);
462 scatterwalk_start(&walk->out, req->dst);
463
b286d8b1
HX
464 walk->flags &= ~SKCIPHER_WALK_SLEEP;
465 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466 SKCIPHER_WALK_SLEEP : 0;
467
468 walk->blocksize = crypto_skcipher_blocksize(tfm);
c821f6ab 469 walk->stride = crypto_skcipher_walksize(tfm);
b286d8b1
HX
470 walk->ivsize = crypto_skcipher_ivsize(tfm);
471 walk->alignmask = crypto_skcipher_alignmask(tfm);
472
473 return skcipher_walk_first(walk);
474}
475
476int skcipher_walk_virt(struct skcipher_walk *walk,
477 struct skcipher_request *req, bool atomic)
478{
479 int err;
480
bb648291
EB
481 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
482
b286d8b1
HX
483 walk->flags &= ~SKCIPHER_WALK_PHYS;
484
485 err = skcipher_walk_skcipher(walk, req);
486
487 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
488
489 return err;
490}
491EXPORT_SYMBOL_GPL(skcipher_walk_virt);
492
493void skcipher_walk_atomise(struct skcipher_walk *walk)
494{
495 walk->flags &= ~SKCIPHER_WALK_SLEEP;
496}
497EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
498
499int skcipher_walk_async(struct skcipher_walk *walk,
500 struct skcipher_request *req)
501{
502 walk->flags |= SKCIPHER_WALK_PHYS;
503
504 INIT_LIST_HEAD(&walk->buffers);
505
506 return skcipher_walk_skcipher(walk, req);
507}
508EXPORT_SYMBOL_GPL(skcipher_walk_async);
509
34bc085c
HX
510static int skcipher_walk_aead_common(struct skcipher_walk *walk,
511 struct aead_request *req, bool atomic)
b286d8b1
HX
512{
513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
514 int err;
515
0cabf2af 516 walk->nbytes = 0;
2b4f27c3
EB
517 walk->iv = req->iv;
518 walk->oiv = req->iv;
0cabf2af
HX
519
520 if (unlikely(!walk->total))
521 return 0;
522
3cbf61fb
AB
523 walk->flags &= ~SKCIPHER_WALK_PHYS;
524
b286d8b1
HX
525 scatterwalk_start(&walk->in, req->src);
526 scatterwalk_start(&walk->out, req->dst);
527
528 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
529 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
530
c14ca838
OM
531 scatterwalk_done(&walk->in, 0, walk->total);
532 scatterwalk_done(&walk->out, 0, walk->total);
533
b286d8b1
HX
534 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
535 walk->flags |= SKCIPHER_WALK_SLEEP;
536 else
537 walk->flags &= ~SKCIPHER_WALK_SLEEP;
538
539 walk->blocksize = crypto_aead_blocksize(tfm);
c821f6ab 540 walk->stride = crypto_aead_chunksize(tfm);
b286d8b1
HX
541 walk->ivsize = crypto_aead_ivsize(tfm);
542 walk->alignmask = crypto_aead_alignmask(tfm);
543
544 err = skcipher_walk_first(walk);
545
546 if (atomic)
547 walk->flags &= ~SKCIPHER_WALK_SLEEP;
548
549 return err;
550}
34bc085c 551
34bc085c
HX
552int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
553 struct aead_request *req, bool atomic)
554{
555 walk->total = req->cryptlen;
556
557 return skcipher_walk_aead_common(walk, req, atomic);
558}
559EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
560
561int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
562 struct aead_request *req, bool atomic)
563{
564 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
565
566 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
567
568 return skcipher_walk_aead_common(walk, req, atomic);
569}
570EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
571
b1f6b4bf
EB
572static void skcipher_set_needkey(struct crypto_skcipher *tfm)
573{
9ac0d136 574 if (crypto_skcipher_max_keysize(tfm) != 0)
b1f6b4bf
EB
575 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
576}
577
9933e113
HX
578static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
579 const u8 *key, unsigned int keylen)
580{
581 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
582 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
583 u8 *buffer, *alignbuffer;
584 unsigned long absize;
585 int ret;
586
587 absize = keylen + alignmask;
588 buffer = kmalloc(absize, GFP_ATOMIC);
589 if (!buffer)
590 return -ENOMEM;
591
592 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
593 memcpy(alignbuffer, key, keylen);
594 ret = cipher->setkey(tfm, alignbuffer, keylen);
595 kzfree(buffer);
596 return ret;
597}
598
15252d94 599int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
9933e113
HX
600 unsigned int keylen)
601{
602 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
603 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
f8d33fac 604 int err;
9933e113 605
674f368a 606 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
9933e113 607 return -EINVAL;
9933e113
HX
608
609 if ((unsigned long)key & alignmask)
f8d33fac
EB
610 err = skcipher_setkey_unaligned(tfm, key, keylen);
611 else
612 err = cipher->setkey(tfm, key, keylen);
613
b1f6b4bf
EB
614 if (unlikely(err)) {
615 skcipher_set_needkey(tfm);
f8d33fac 616 return err;
b1f6b4bf 617 }
9933e113 618
f8d33fac
EB
619 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
620 return 0;
9933e113 621}
15252d94 622EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
9933e113 623
81bcbb1e
EB
624int crypto_skcipher_encrypt(struct skcipher_request *req)
625{
626 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
627 struct crypto_alg *alg = tfm->base.__crt_alg;
628 unsigned int cryptlen = req->cryptlen;
629 int ret;
630
631 crypto_stats_get(alg);
632 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
633 ret = -ENOKEY;
634 else
848755e3 635 ret = crypto_skcipher_alg(tfm)->encrypt(req);
81bcbb1e
EB
636 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
637 return ret;
638}
639EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
640
641int crypto_skcipher_decrypt(struct skcipher_request *req)
642{
643 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
644 struct crypto_alg *alg = tfm->base.__crt_alg;
645 unsigned int cryptlen = req->cryptlen;
646 int ret;
647
648 crypto_stats_get(alg);
649 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
650 ret = -ENOKEY;
651 else
7e1c1099 652 ret = crypto_skcipher_alg(tfm)->decrypt(req);
81bcbb1e
EB
653 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
654 return ret;
655}
656EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
657
4e6c3df4
HX
658static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
659{
660 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
661 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
662
663 alg->exit(skcipher);
664}
665
7a7ffe65
HX
666static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
667{
4e6c3df4
HX
668 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
669 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
670
b1f6b4bf 671 skcipher_set_needkey(skcipher);
f8d33fac 672
4e6c3df4
HX
673 if (alg->exit)
674 skcipher->base.exit = crypto_skcipher_exit_tfm;
7a7ffe65 675
4e6c3df4
HX
676 if (alg->init)
677 return alg->init(skcipher);
678
679 return 0;
680}
681
682static void crypto_skcipher_free_instance(struct crypto_instance *inst)
683{
684 struct skcipher_instance *skcipher =
685 container_of(inst, struct skcipher_instance, s.base);
686
687 skcipher->free(skcipher);
688}
689
690static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
d8c34b94 691 __maybe_unused;
4e6c3df4
HX
692static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
693{
694 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
695 base);
696
697 seq_printf(m, "type : skcipher\n");
698 seq_printf(m, "async : %s\n",
699 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
700 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
701 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
702 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
703 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
704 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
c821f6ab 705 seq_printf(m, "walksize : %u\n", skcipher->walksize);
7a7ffe65
HX
706}
707
4e6c3df4
HX
708#ifdef CONFIG_NET
709static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
710{
711 struct crypto_report_blkcipher rblkcipher;
712 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
713 base);
714
37db69e0
EB
715 memset(&rblkcipher, 0, sizeof(rblkcipher));
716
717 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
718 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
4e6c3df4
HX
719
720 rblkcipher.blocksize = alg->cra_blocksize;
721 rblkcipher.min_keysize = skcipher->min_keysize;
722 rblkcipher.max_keysize = skcipher->max_keysize;
723 rblkcipher.ivsize = skcipher->ivsize;
724
37db69e0
EB
725 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
726 sizeof(rblkcipher), &rblkcipher);
4e6c3df4
HX
727}
728#else
729static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
730{
731 return -ENOSYS;
732}
733#endif
734
53253064 735static const struct crypto_type crypto_skcipher_type = {
89873b44 736 .extsize = crypto_alg_extsize,
7a7ffe65 737 .init_tfm = crypto_skcipher_init_tfm,
4e6c3df4
HX
738 .free = crypto_skcipher_free_instance,
739#ifdef CONFIG_PROC_FS
740 .show = crypto_skcipher_show,
741#endif
742 .report = crypto_skcipher_report,
7a7ffe65 743 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
c65058b7 744 .maskset = CRYPTO_ALG_TYPE_MASK,
4e6c3df4 745 .type = CRYPTO_ALG_TYPE_SKCIPHER,
7a7ffe65
HX
746 .tfmsize = offsetof(struct crypto_skcipher, base),
747};
748
3a01d0ee 749int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
b9f76ddd
EB
750 struct crypto_instance *inst,
751 const char *name, u32 type, u32 mask)
4e6c3df4 752{
53253064 753 spawn->base.frontend = &crypto_skcipher_type;
de95c957 754 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
4e6c3df4 755}
3a01d0ee 756EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
4e6c3df4 757
7a7ffe65
HX
758struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
759 u32 type, u32 mask)
760{
53253064 761 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
7a7ffe65
HX
762}
763EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
764
b350bee5
KC
765struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
766 const char *alg_name, u32 type, u32 mask)
767{
768 struct crypto_skcipher *tfm;
769
770 /* Only sync algorithms allowed. */
771 mask |= CRYPTO_ALG_ASYNC;
772
53253064 773 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
b350bee5
KC
774
775 /*
776 * Make sure we do not allocate something that might get used with
777 * an on-stack request: check the request size.
778 */
779 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
780 MAX_SYNC_SKCIPHER_REQSIZE)) {
781 crypto_free_skcipher(tfm);
782 return ERR_PTR(-EINVAL);
783 }
784
785 return (struct crypto_sync_skcipher *)tfm;
786}
787EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
788
d3ca75a8 789int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
4e6c3df4 790{
53253064 791 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
4e6c3df4 792}
d3ca75a8 793EXPORT_SYMBOL_GPL(crypto_has_skcipher);
4e6c3df4
HX
794
795static int skcipher_prepare_alg(struct skcipher_alg *alg)
796{
797 struct crypto_alg *base = &alg->base;
798
c821f6ab
AB
799 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
800 alg->walksize > PAGE_SIZE / 8)
4e6c3df4
HX
801 return -EINVAL;
802
803 if (!alg->chunksize)
804 alg->chunksize = base->cra_blocksize;
c821f6ab
AB
805 if (!alg->walksize)
806 alg->walksize = alg->chunksize;
4e6c3df4 807
53253064 808 base->cra_type = &crypto_skcipher_type;
4e6c3df4
HX
809 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
810 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
811
812 return 0;
813}
814
815int crypto_register_skcipher(struct skcipher_alg *alg)
816{
817 struct crypto_alg *base = &alg->base;
818 int err;
819
820 err = skcipher_prepare_alg(alg);
821 if (err)
822 return err;
823
824 return crypto_register_alg(base);
825}
826EXPORT_SYMBOL_GPL(crypto_register_skcipher);
827
828void crypto_unregister_skcipher(struct skcipher_alg *alg)
829{
830 crypto_unregister_alg(&alg->base);
831}
832EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
833
834int crypto_register_skciphers(struct skcipher_alg *algs, int count)
835{
836 int i, ret;
837
838 for (i = 0; i < count; i++) {
839 ret = crypto_register_skcipher(&algs[i]);
840 if (ret)
841 goto err;
842 }
843
844 return 0;
845
846err:
847 for (--i; i >= 0; --i)
848 crypto_unregister_skcipher(&algs[i]);
849
850 return ret;
851}
852EXPORT_SYMBOL_GPL(crypto_register_skciphers);
853
854void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
855{
856 int i;
857
858 for (i = count - 1; i >= 0; --i)
859 crypto_unregister_skcipher(&algs[i]);
860}
861EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
862
863int skcipher_register_instance(struct crypto_template *tmpl,
864 struct skcipher_instance *inst)
865{
866 int err;
867
d4fdc2df
EB
868 if (WARN_ON(!inst->free))
869 return -EINVAL;
870
4e6c3df4
HX
871 err = skcipher_prepare_alg(&inst->alg);
872 if (err)
873 return err;
874
875 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
876}
877EXPORT_SYMBOL_GPL(skcipher_register_instance);
878
0872da16
EB
879static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
880 unsigned int keylen)
881{
882 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
0872da16
EB
883
884 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
885 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
886 CRYPTO_TFM_REQ_MASK);
af5034e8 887 return crypto_cipher_setkey(cipher, key, keylen);
0872da16
EB
888}
889
890static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
891{
892 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
d5ed3b65 893 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
0872da16
EB
894 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
895 struct crypto_cipher *cipher;
896
897 cipher = crypto_spawn_cipher(spawn);
898 if (IS_ERR(cipher))
899 return PTR_ERR(cipher);
900
901 ctx->cipher = cipher;
902 return 0;
903}
904
905static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
906{
907 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
908
909 crypto_free_cipher(ctx->cipher);
910}
911
912static void skcipher_free_instance_simple(struct skcipher_instance *inst)
913{
aacd5b4c 914 crypto_drop_cipher(skcipher_instance_ctx(inst));
0872da16
EB
915 kfree(inst);
916}
917
918/**
919 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
920 *
921 * Allocate an skcipher_instance for a simple block cipher mode of operation,
922 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
923 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
924 * alignmask, and priority are set from the underlying cipher but can be
925 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
926 * default ->setkey(), ->init(), and ->exit() methods are installed.
927 *
928 * @tmpl: the template being instantiated
929 * @tb: the template parameters
0872da16
EB
930 *
931 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
932 * needs to register the instance.
933 */
b3c16bfc
HX
934struct skcipher_instance *skcipher_alloc_instance_simple(
935 struct crypto_template *tmpl, struct rtattr **tb)
0872da16
EB
936{
937 struct crypto_attr_type *algt;
0872da16 938 u32 mask;
aacd5b4c
EB
939 struct skcipher_instance *inst;
940 struct crypto_cipher_spawn *spawn;
941 struct crypto_alg *cipher_alg;
0872da16
EB
942 int err;
943
944 algt = crypto_get_attr_type(tb);
945 if (IS_ERR(algt))
946 return ERR_CAST(algt);
947
948 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
949 return ERR_PTR(-EINVAL);
950
aacd5b4c
EB
951 mask = crypto_requires_off(algt->type, algt->mask,
952 CRYPTO_ALG_NEED_FALLBACK);
0872da16
EB
953
954 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
aacd5b4c
EB
955 if (!inst)
956 return ERR_PTR(-ENOMEM);
0872da16
EB
957 spawn = skcipher_instance_ctx(inst);
958
aacd5b4c
EB
959 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
960 crypto_attr_alg_name(tb[1]), 0, mask);
0872da16
EB
961 if (err)
962 goto err_free_inst;
aacd5b4c 963 cipher_alg = crypto_spawn_cipher_alg(spawn);
0872da16 964
aacd5b4c
EB
965 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
966 cipher_alg);
0872da16
EB
967 if (err)
968 goto err_free_inst;
aacd5b4c 969
0872da16
EB
970 inst->free = skcipher_free_instance_simple;
971
972 /* Default algorithm properties, can be overridden */
973 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
974 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
975 inst->alg.base.cra_priority = cipher_alg->cra_priority;
976 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
977 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
978 inst->alg.ivsize = cipher_alg->cra_blocksize;
979
980 /* Use skcipher_ctx_simple by default, can be overridden */
981 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
982 inst->alg.setkey = skcipher_setkey_simple;
983 inst->alg.init = skcipher_init_tfm_simple;
984 inst->alg.exit = skcipher_exit_tfm_simple;
985
0872da16
EB
986 return inst;
987
988err_free_inst:
aacd5b4c 989 skcipher_free_instance_simple(inst);
0872da16
EB
990 return ERR_PTR(err);
991}
992EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
993
7a7ffe65
HX
994MODULE_LICENSE("GPL");
995MODULE_DESCRIPTION("Symmetric key cipher type");