]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/cryptd.c
Merge branch 'for-4.3/microsoft' into for-linus
[mirror_ubuntu-artful-kernel.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33
34 #define CRYPTD_MAX_CPU_QLEN 100
35
36 struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39 };
40
41 struct cryptd_queue {
42 struct cryptd_cpu_queue __percpu *cpu_queue;
43 };
44
45 struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
47 struct cryptd_queue *queue;
48 };
49
50 struct hashd_instance_ctx {
51 struct crypto_shash_spawn spawn;
52 struct cryptd_queue *queue;
53 };
54
55 struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58 };
59
60 struct cryptd_blkcipher_ctx {
61 struct crypto_blkcipher *child;
62 };
63
64 struct cryptd_blkcipher_request_ctx {
65 crypto_completion_t complete;
66 };
67
68 struct cryptd_hash_ctx {
69 struct crypto_shash *child;
70 };
71
72 struct cryptd_hash_request_ctx {
73 crypto_completion_t complete;
74 struct shash_desc desc;
75 };
76
77 struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79 };
80
81 struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83 };
84
85 static void cryptd_queue_worker(struct work_struct *work);
86
87 static int cryptd_init_queue(struct cryptd_queue *queue,
88 unsigned int max_cpu_qlen)
89 {
90 int cpu;
91 struct cryptd_cpu_queue *cpu_queue;
92
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94 if (!queue->cpu_queue)
95 return -ENOMEM;
96 for_each_possible_cpu(cpu) {
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100 }
101 return 0;
102 }
103
104 static void cryptd_fini_queue(struct cryptd_queue *queue)
105 {
106 int cpu;
107 struct cryptd_cpu_queue *cpu_queue;
108
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 BUG_ON(cpu_queue->queue.qlen);
112 }
113 free_percpu(queue->cpu_queue);
114 }
115
116 static int cryptd_enqueue_request(struct cryptd_queue *queue,
117 struct crypto_async_request *request)
118 {
119 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue;
121
122 cpu = get_cpu();
123 cpu_queue = this_cpu_ptr(queue->cpu_queue);
124 err = crypto_enqueue_request(&cpu_queue->queue, request);
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126 put_cpu();
127
128 return err;
129 }
130
131 /* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134 static void cryptd_queue_worker(struct work_struct *work)
135 {
136 struct cryptd_cpu_queue *cpu_queue;
137 struct crypto_async_request *req, *backlog;
138
139 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140 /*
141 * Only handle one request at a time to avoid hogging crypto workqueue.
142 * preempt_disable/enable is used to prevent being preempted by
143 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144 * cryptd_enqueue_request() being accessed from software interrupts.
145 */
146 local_bh_disable();
147 preempt_disable();
148 backlog = crypto_get_backlog(&cpu_queue->queue);
149 req = crypto_dequeue_request(&cpu_queue->queue);
150 preempt_enable();
151 local_bh_enable();
152
153 if (!req)
154 return;
155
156 if (backlog)
157 backlog->complete(backlog, -EINPROGRESS);
158 req->complete(req, 0);
159
160 if (cpu_queue->queue.qlen)
161 queue_work(kcrypto_wq, &cpu_queue->work);
162 }
163
164 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165 {
166 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168 return ictx->queue;
169 }
170
171 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
172 u32 *mask)
173 {
174 struct crypto_attr_type *algt;
175
176 algt = crypto_get_attr_type(tb);
177 if (IS_ERR(algt))
178 return;
179 if ((algt->type & CRYPTO_ALG_INTERNAL))
180 *type |= CRYPTO_ALG_INTERNAL;
181 if ((algt->mask & CRYPTO_ALG_INTERNAL))
182 *mask |= CRYPTO_ALG_INTERNAL;
183 }
184
185 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
186 const u8 *key, unsigned int keylen)
187 {
188 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
189 struct crypto_blkcipher *child = ctx->child;
190 int err;
191
192 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
193 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
194 CRYPTO_TFM_REQ_MASK);
195 err = crypto_blkcipher_setkey(child, key, keylen);
196 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
197 CRYPTO_TFM_RES_MASK);
198 return err;
199 }
200
201 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
202 struct crypto_blkcipher *child,
203 int err,
204 int (*crypt)(struct blkcipher_desc *desc,
205 struct scatterlist *dst,
206 struct scatterlist *src,
207 unsigned int len))
208 {
209 struct cryptd_blkcipher_request_ctx *rctx;
210 struct blkcipher_desc desc;
211
212 rctx = ablkcipher_request_ctx(req);
213
214 if (unlikely(err == -EINPROGRESS))
215 goto out;
216
217 desc.tfm = child;
218 desc.info = req->info;
219 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
220
221 err = crypt(&desc, req->dst, req->src, req->nbytes);
222
223 req->base.complete = rctx->complete;
224
225 out:
226 local_bh_disable();
227 rctx->complete(&req->base, err);
228 local_bh_enable();
229 }
230
231 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
232 {
233 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
234 struct crypto_blkcipher *child = ctx->child;
235
236 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
237 crypto_blkcipher_crt(child)->encrypt);
238 }
239
240 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
241 {
242 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
243 struct crypto_blkcipher *child = ctx->child;
244
245 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
246 crypto_blkcipher_crt(child)->decrypt);
247 }
248
249 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
250 crypto_completion_t compl)
251 {
252 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
253 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
254 struct cryptd_queue *queue;
255
256 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
257 rctx->complete = req->base.complete;
258 req->base.complete = compl;
259
260 return cryptd_enqueue_request(queue, &req->base);
261 }
262
263 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
264 {
265 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
266 }
267
268 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
269 {
270 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
271 }
272
273 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
274 {
275 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
276 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
277 struct crypto_spawn *spawn = &ictx->spawn;
278 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
279 struct crypto_blkcipher *cipher;
280
281 cipher = crypto_spawn_blkcipher(spawn);
282 if (IS_ERR(cipher))
283 return PTR_ERR(cipher);
284
285 ctx->child = cipher;
286 tfm->crt_ablkcipher.reqsize =
287 sizeof(struct cryptd_blkcipher_request_ctx);
288 return 0;
289 }
290
291 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
292 {
293 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
294
295 crypto_free_blkcipher(ctx->child);
296 }
297
298 static int cryptd_init_instance(struct crypto_instance *inst,
299 struct crypto_alg *alg)
300 {
301 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
302 "cryptd(%s)",
303 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
304 return -ENAMETOOLONG;
305
306 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
307
308 inst->alg.cra_priority = alg->cra_priority + 50;
309 inst->alg.cra_blocksize = alg->cra_blocksize;
310 inst->alg.cra_alignmask = alg->cra_alignmask;
311
312 return 0;
313 }
314
315 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
316 unsigned int tail)
317 {
318 char *p;
319 struct crypto_instance *inst;
320 int err;
321
322 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
323 if (!p)
324 return ERR_PTR(-ENOMEM);
325
326 inst = (void *)(p + head);
327
328 err = cryptd_init_instance(inst, alg);
329 if (err)
330 goto out_free_inst;
331
332 out:
333 return p;
334
335 out_free_inst:
336 kfree(p);
337 p = ERR_PTR(err);
338 goto out;
339 }
340
341 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
342 struct rtattr **tb,
343 struct cryptd_queue *queue)
344 {
345 struct cryptd_instance_ctx *ctx;
346 struct crypto_instance *inst;
347 struct crypto_alg *alg;
348 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
349 u32 mask = CRYPTO_ALG_TYPE_MASK;
350 int err;
351
352 cryptd_check_internal(tb, &type, &mask);
353
354 alg = crypto_get_attr_alg(tb, type, mask);
355 if (IS_ERR(alg))
356 return PTR_ERR(alg);
357
358 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
359 err = PTR_ERR(inst);
360 if (IS_ERR(inst))
361 goto out_put_alg;
362
363 ctx = crypto_instance_ctx(inst);
364 ctx->queue = queue;
365
366 err = crypto_init_spawn(&ctx->spawn, alg, inst,
367 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
368 if (err)
369 goto out_free_inst;
370
371 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
372 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
373 type |= CRYPTO_ALG_INTERNAL;
374 inst->alg.cra_flags = type;
375 inst->alg.cra_type = &crypto_ablkcipher_type;
376
377 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
378 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
379 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
380
381 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
382
383 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
384
385 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
386 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
387
388 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
389 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
390 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
391
392 err = crypto_register_instance(tmpl, inst);
393 if (err) {
394 crypto_drop_spawn(&ctx->spawn);
395 out_free_inst:
396 kfree(inst);
397 }
398
399 out_put_alg:
400 crypto_mod_put(alg);
401 return err;
402 }
403
404 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
405 {
406 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
407 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
408 struct crypto_shash_spawn *spawn = &ictx->spawn;
409 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
410 struct crypto_shash *hash;
411
412 hash = crypto_spawn_shash(spawn);
413 if (IS_ERR(hash))
414 return PTR_ERR(hash);
415
416 ctx->child = hash;
417 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
418 sizeof(struct cryptd_hash_request_ctx) +
419 crypto_shash_descsize(hash));
420 return 0;
421 }
422
423 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
424 {
425 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
426
427 crypto_free_shash(ctx->child);
428 }
429
430 static int cryptd_hash_setkey(struct crypto_ahash *parent,
431 const u8 *key, unsigned int keylen)
432 {
433 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
434 struct crypto_shash *child = ctx->child;
435 int err;
436
437 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
438 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
439 CRYPTO_TFM_REQ_MASK);
440 err = crypto_shash_setkey(child, key, keylen);
441 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
442 CRYPTO_TFM_RES_MASK);
443 return err;
444 }
445
446 static int cryptd_hash_enqueue(struct ahash_request *req,
447 crypto_completion_t compl)
448 {
449 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
450 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
451 struct cryptd_queue *queue =
452 cryptd_get_queue(crypto_ahash_tfm(tfm));
453
454 rctx->complete = req->base.complete;
455 req->base.complete = compl;
456
457 return cryptd_enqueue_request(queue, &req->base);
458 }
459
460 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
461 {
462 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
463 struct crypto_shash *child = ctx->child;
464 struct ahash_request *req = ahash_request_cast(req_async);
465 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
466 struct shash_desc *desc = &rctx->desc;
467
468 if (unlikely(err == -EINPROGRESS))
469 goto out;
470
471 desc->tfm = child;
472 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
473
474 err = crypto_shash_init(desc);
475
476 req->base.complete = rctx->complete;
477
478 out:
479 local_bh_disable();
480 rctx->complete(&req->base, err);
481 local_bh_enable();
482 }
483
484 static int cryptd_hash_init_enqueue(struct ahash_request *req)
485 {
486 return cryptd_hash_enqueue(req, cryptd_hash_init);
487 }
488
489 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
490 {
491 struct ahash_request *req = ahash_request_cast(req_async);
492 struct cryptd_hash_request_ctx *rctx;
493
494 rctx = ahash_request_ctx(req);
495
496 if (unlikely(err == -EINPROGRESS))
497 goto out;
498
499 err = shash_ahash_update(req, &rctx->desc);
500
501 req->base.complete = rctx->complete;
502
503 out:
504 local_bh_disable();
505 rctx->complete(&req->base, err);
506 local_bh_enable();
507 }
508
509 static int cryptd_hash_update_enqueue(struct ahash_request *req)
510 {
511 return cryptd_hash_enqueue(req, cryptd_hash_update);
512 }
513
514 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
515 {
516 struct ahash_request *req = ahash_request_cast(req_async);
517 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
518
519 if (unlikely(err == -EINPROGRESS))
520 goto out;
521
522 err = crypto_shash_final(&rctx->desc, req->result);
523
524 req->base.complete = rctx->complete;
525
526 out:
527 local_bh_disable();
528 rctx->complete(&req->base, err);
529 local_bh_enable();
530 }
531
532 static int cryptd_hash_final_enqueue(struct ahash_request *req)
533 {
534 return cryptd_hash_enqueue(req, cryptd_hash_final);
535 }
536
537 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
538 {
539 struct ahash_request *req = ahash_request_cast(req_async);
540 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
541
542 if (unlikely(err == -EINPROGRESS))
543 goto out;
544
545 err = shash_ahash_finup(req, &rctx->desc);
546
547 req->base.complete = rctx->complete;
548
549 out:
550 local_bh_disable();
551 rctx->complete(&req->base, err);
552 local_bh_enable();
553 }
554
555 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
556 {
557 return cryptd_hash_enqueue(req, cryptd_hash_finup);
558 }
559
560 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
561 {
562 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
563 struct crypto_shash *child = ctx->child;
564 struct ahash_request *req = ahash_request_cast(req_async);
565 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
566 struct shash_desc *desc = &rctx->desc;
567
568 if (unlikely(err == -EINPROGRESS))
569 goto out;
570
571 desc->tfm = child;
572 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
573
574 err = shash_ahash_digest(req, desc);
575
576 req->base.complete = rctx->complete;
577
578 out:
579 local_bh_disable();
580 rctx->complete(&req->base, err);
581 local_bh_enable();
582 }
583
584 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
585 {
586 return cryptd_hash_enqueue(req, cryptd_hash_digest);
587 }
588
589 static int cryptd_hash_export(struct ahash_request *req, void *out)
590 {
591 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
592
593 return crypto_shash_export(&rctx->desc, out);
594 }
595
596 static int cryptd_hash_import(struct ahash_request *req, const void *in)
597 {
598 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
599
600 return crypto_shash_import(&rctx->desc, in);
601 }
602
603 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
604 struct cryptd_queue *queue)
605 {
606 struct hashd_instance_ctx *ctx;
607 struct ahash_instance *inst;
608 struct shash_alg *salg;
609 struct crypto_alg *alg;
610 u32 type = 0;
611 u32 mask = 0;
612 int err;
613
614 cryptd_check_internal(tb, &type, &mask);
615
616 salg = shash_attr_alg(tb[1], type, mask);
617 if (IS_ERR(salg))
618 return PTR_ERR(salg);
619
620 alg = &salg->base;
621 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
622 sizeof(*ctx));
623 err = PTR_ERR(inst);
624 if (IS_ERR(inst))
625 goto out_put_alg;
626
627 ctx = ahash_instance_ctx(inst);
628 ctx->queue = queue;
629
630 err = crypto_init_shash_spawn(&ctx->spawn, salg,
631 ahash_crypto_instance(inst));
632 if (err)
633 goto out_free_inst;
634
635 type = CRYPTO_ALG_ASYNC;
636 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
637 type |= CRYPTO_ALG_INTERNAL;
638 inst->alg.halg.base.cra_flags = type;
639
640 inst->alg.halg.digestsize = salg->digestsize;
641 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
642
643 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
644 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
645
646 inst->alg.init = cryptd_hash_init_enqueue;
647 inst->alg.update = cryptd_hash_update_enqueue;
648 inst->alg.final = cryptd_hash_final_enqueue;
649 inst->alg.finup = cryptd_hash_finup_enqueue;
650 inst->alg.export = cryptd_hash_export;
651 inst->alg.import = cryptd_hash_import;
652 inst->alg.setkey = cryptd_hash_setkey;
653 inst->alg.digest = cryptd_hash_digest_enqueue;
654
655 err = ahash_register_instance(tmpl, inst);
656 if (err) {
657 crypto_drop_shash(&ctx->spawn);
658 out_free_inst:
659 kfree(inst);
660 }
661
662 out_put_alg:
663 crypto_mod_put(alg);
664 return err;
665 }
666
667 static int cryptd_aead_setkey(struct crypto_aead *parent,
668 const u8 *key, unsigned int keylen)
669 {
670 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
671 struct crypto_aead *child = ctx->child;
672
673 return crypto_aead_setkey(child, key, keylen);
674 }
675
676 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
677 unsigned int authsize)
678 {
679 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
680 struct crypto_aead *child = ctx->child;
681
682 return crypto_aead_setauthsize(child, authsize);
683 }
684
685 static void cryptd_aead_crypt(struct aead_request *req,
686 struct crypto_aead *child,
687 int err,
688 int (*crypt)(struct aead_request *req))
689 {
690 struct cryptd_aead_request_ctx *rctx;
691 rctx = aead_request_ctx(req);
692
693 if (unlikely(err == -EINPROGRESS))
694 goto out;
695 aead_request_set_tfm(req, child);
696 err = crypt( req );
697 req->base.complete = rctx->complete;
698 out:
699 local_bh_disable();
700 rctx->complete(&req->base, err);
701 local_bh_enable();
702 }
703
704 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
705 {
706 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
707 struct crypto_aead *child = ctx->child;
708 struct aead_request *req;
709
710 req = container_of(areq, struct aead_request, base);
711 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
712 }
713
714 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
715 {
716 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
717 struct crypto_aead *child = ctx->child;
718 struct aead_request *req;
719
720 req = container_of(areq, struct aead_request, base);
721 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
722 }
723
724 static int cryptd_aead_enqueue(struct aead_request *req,
725 crypto_completion_t compl)
726 {
727 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
728 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
729 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
730
731 rctx->complete = req->base.complete;
732 req->base.complete = compl;
733 return cryptd_enqueue_request(queue, &req->base);
734 }
735
736 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
737 {
738 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
739 }
740
741 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
742 {
743 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
744 }
745
746 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
747 {
748 struct aead_instance *inst = aead_alg_instance(tfm);
749 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
750 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
751 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
752 struct crypto_aead *cipher;
753
754 cipher = crypto_spawn_aead(spawn);
755 if (IS_ERR(cipher))
756 return PTR_ERR(cipher);
757
758 ctx->child = cipher;
759 crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
760 return 0;
761 }
762
763 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
764 {
765 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
766 crypto_free_aead(ctx->child);
767 }
768
769 static int cryptd_create_aead(struct crypto_template *tmpl,
770 struct rtattr **tb,
771 struct cryptd_queue *queue)
772 {
773 struct aead_instance_ctx *ctx;
774 struct aead_instance *inst;
775 struct aead_alg *alg;
776 const char *name;
777 u32 type = 0;
778 u32 mask = 0;
779 int err;
780
781 cryptd_check_internal(tb, &type, &mask);
782
783 name = crypto_attr_alg_name(tb[1]);
784 if (IS_ERR(name))
785 return PTR_ERR(name);
786
787 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
788 if (!inst)
789 return -ENOMEM;
790
791 ctx = aead_instance_ctx(inst);
792 ctx->queue = queue;
793
794 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
795 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
796 if (err)
797 goto out_free_inst;
798
799 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
800 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
801 if (err)
802 goto out_drop_aead;
803
804 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
805 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
806 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
807
808 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
809 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
810
811 inst->alg.init = cryptd_aead_init_tfm;
812 inst->alg.exit = cryptd_aead_exit_tfm;
813 inst->alg.setkey = cryptd_aead_setkey;
814 inst->alg.setauthsize = cryptd_aead_setauthsize;
815 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
816 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
817
818 err = aead_register_instance(tmpl, inst);
819 if (err) {
820 out_drop_aead:
821 crypto_drop_aead(&ctx->aead_spawn);
822 out_free_inst:
823 kfree(inst);
824 }
825 return err;
826 }
827
828 static struct cryptd_queue queue;
829
830 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
831 {
832 struct crypto_attr_type *algt;
833
834 algt = crypto_get_attr_type(tb);
835 if (IS_ERR(algt))
836 return PTR_ERR(algt);
837
838 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
839 case CRYPTO_ALG_TYPE_BLKCIPHER:
840 return cryptd_create_blkcipher(tmpl, tb, &queue);
841 case CRYPTO_ALG_TYPE_DIGEST:
842 return cryptd_create_hash(tmpl, tb, &queue);
843 case CRYPTO_ALG_TYPE_AEAD:
844 return cryptd_create_aead(tmpl, tb, &queue);
845 }
846
847 return -EINVAL;
848 }
849
850 static void cryptd_free(struct crypto_instance *inst)
851 {
852 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
853 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
854 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
855
856 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
857 case CRYPTO_ALG_TYPE_AHASH:
858 crypto_drop_shash(&hctx->spawn);
859 kfree(ahash_instance(inst));
860 return;
861 case CRYPTO_ALG_TYPE_AEAD:
862 crypto_drop_aead(&aead_ctx->aead_spawn);
863 kfree(aead_instance(inst));
864 return;
865 default:
866 crypto_drop_spawn(&ctx->spawn);
867 kfree(inst);
868 }
869 }
870
871 static struct crypto_template cryptd_tmpl = {
872 .name = "cryptd",
873 .create = cryptd_create,
874 .free = cryptd_free,
875 .module = THIS_MODULE,
876 };
877
878 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
879 u32 type, u32 mask)
880 {
881 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
882 struct crypto_tfm *tfm;
883
884 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
885 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
886 return ERR_PTR(-EINVAL);
887 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
888 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
889 mask &= ~CRYPTO_ALG_TYPE_MASK;
890 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
891 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
892 if (IS_ERR(tfm))
893 return ERR_CAST(tfm);
894 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
895 crypto_free_tfm(tfm);
896 return ERR_PTR(-EINVAL);
897 }
898
899 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
900 }
901 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
902
903 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
904 {
905 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
906 return ctx->child;
907 }
908 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
909
910 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
911 {
912 crypto_free_ablkcipher(&tfm->base);
913 }
914 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
915
916 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
917 u32 type, u32 mask)
918 {
919 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
920 struct crypto_ahash *tfm;
921
922 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
923 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
924 return ERR_PTR(-EINVAL);
925 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
926 if (IS_ERR(tfm))
927 return ERR_CAST(tfm);
928 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
929 crypto_free_ahash(tfm);
930 return ERR_PTR(-EINVAL);
931 }
932
933 return __cryptd_ahash_cast(tfm);
934 }
935 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
936
937 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
938 {
939 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
940
941 return ctx->child;
942 }
943 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
944
945 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
946 {
947 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
948 return &rctx->desc;
949 }
950 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
951
952 void cryptd_free_ahash(struct cryptd_ahash *tfm)
953 {
954 crypto_free_ahash(&tfm->base);
955 }
956 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
957
958 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
959 u32 type, u32 mask)
960 {
961 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
962 struct crypto_aead *tfm;
963
964 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
965 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
966 return ERR_PTR(-EINVAL);
967 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
968 if (IS_ERR(tfm))
969 return ERR_CAST(tfm);
970 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
971 crypto_free_aead(tfm);
972 return ERR_PTR(-EINVAL);
973 }
974 return __cryptd_aead_cast(tfm);
975 }
976 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
977
978 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
979 {
980 struct cryptd_aead_ctx *ctx;
981 ctx = crypto_aead_ctx(&tfm->base);
982 return ctx->child;
983 }
984 EXPORT_SYMBOL_GPL(cryptd_aead_child);
985
986 void cryptd_free_aead(struct cryptd_aead *tfm)
987 {
988 crypto_free_aead(&tfm->base);
989 }
990 EXPORT_SYMBOL_GPL(cryptd_free_aead);
991
992 static int __init cryptd_init(void)
993 {
994 int err;
995
996 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
997 if (err)
998 return err;
999
1000 err = crypto_register_template(&cryptd_tmpl);
1001 if (err)
1002 cryptd_fini_queue(&queue);
1003
1004 return err;
1005 }
1006
1007 static void __exit cryptd_exit(void)
1008 {
1009 cryptd_fini_queue(&queue);
1010 crypto_unregister_template(&cryptd_tmpl);
1011 }
1012
1013 subsys_initcall(cryptd_init);
1014 module_exit(cryptd_exit);
1015
1016 MODULE_LICENSE("GPL");
1017 MODULE_DESCRIPTION("Software async crypto daemon");
1018 MODULE_ALIAS_CRYPTO("cryptd");