]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/cryptd.c
crypto: api - Check spawn->alg under lock in crypto_drop_spawn
[mirror_ubuntu-bionic-kernel.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/skcipher.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/atomic.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34
35 #define CRYPTD_MAX_CPU_QLEN 1000
36
37 struct cryptd_cpu_queue {
38 struct crypto_queue queue;
39 struct work_struct work;
40 };
41
42 struct cryptd_queue {
43 struct cryptd_cpu_queue __percpu *cpu_queue;
44 };
45
46 struct cryptd_instance_ctx {
47 struct crypto_spawn spawn;
48 struct cryptd_queue *queue;
49 };
50
51 struct skcipherd_instance_ctx {
52 struct crypto_skcipher_spawn spawn;
53 struct cryptd_queue *queue;
54 };
55
56 struct hashd_instance_ctx {
57 struct crypto_shash_spawn spawn;
58 struct cryptd_queue *queue;
59 };
60
61 struct aead_instance_ctx {
62 struct crypto_aead_spawn aead_spawn;
63 struct cryptd_queue *queue;
64 };
65
66 struct cryptd_blkcipher_ctx {
67 atomic_t refcnt;
68 struct crypto_blkcipher *child;
69 };
70
71 struct cryptd_blkcipher_request_ctx {
72 crypto_completion_t complete;
73 };
74
75 struct cryptd_skcipher_ctx {
76 atomic_t refcnt;
77 struct crypto_skcipher *child;
78 };
79
80 struct cryptd_skcipher_request_ctx {
81 crypto_completion_t complete;
82 };
83
84 struct cryptd_hash_ctx {
85 atomic_t refcnt;
86 struct crypto_shash *child;
87 };
88
89 struct cryptd_hash_request_ctx {
90 crypto_completion_t complete;
91 struct shash_desc desc;
92 };
93
94 struct cryptd_aead_ctx {
95 atomic_t refcnt;
96 struct crypto_aead *child;
97 };
98
99 struct cryptd_aead_request_ctx {
100 crypto_completion_t complete;
101 };
102
103 static void cryptd_queue_worker(struct work_struct *work);
104
105 static int cryptd_init_queue(struct cryptd_queue *queue,
106 unsigned int max_cpu_qlen)
107 {
108 int cpu;
109 struct cryptd_cpu_queue *cpu_queue;
110
111 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
112 if (!queue->cpu_queue)
113 return -ENOMEM;
114 for_each_possible_cpu(cpu) {
115 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
116 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
117 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
118 }
119 return 0;
120 }
121
122 static void cryptd_fini_queue(struct cryptd_queue *queue)
123 {
124 int cpu;
125 struct cryptd_cpu_queue *cpu_queue;
126
127 for_each_possible_cpu(cpu) {
128 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
129 BUG_ON(cpu_queue->queue.qlen);
130 }
131 free_percpu(queue->cpu_queue);
132 }
133
134 static int cryptd_enqueue_request(struct cryptd_queue *queue,
135 struct crypto_async_request *request)
136 {
137 int cpu, err;
138 struct cryptd_cpu_queue *cpu_queue;
139 atomic_t *refcnt;
140
141 cpu = get_cpu();
142 cpu_queue = this_cpu_ptr(queue->cpu_queue);
143 err = crypto_enqueue_request(&cpu_queue->queue, request);
144
145 refcnt = crypto_tfm_ctx(request->tfm);
146
147 if (err == -ENOSPC)
148 goto out_put_cpu;
149
150 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
151
152 if (!atomic_read(refcnt))
153 goto out_put_cpu;
154
155 atomic_inc(refcnt);
156
157 out_put_cpu:
158 put_cpu();
159
160 return err;
161 }
162
163 /* Called in workqueue context, do one real cryption work (via
164 * req->complete) and reschedule itself if there are more work to
165 * do. */
166 static void cryptd_queue_worker(struct work_struct *work)
167 {
168 struct cryptd_cpu_queue *cpu_queue;
169 struct crypto_async_request *req, *backlog;
170
171 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
172 /*
173 * Only handle one request at a time to avoid hogging crypto workqueue.
174 * preempt_disable/enable is used to prevent being preempted by
175 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
176 * cryptd_enqueue_request() being accessed from software interrupts.
177 */
178 local_bh_disable();
179 preempt_disable();
180 backlog = crypto_get_backlog(&cpu_queue->queue);
181 req = crypto_dequeue_request(&cpu_queue->queue);
182 preempt_enable();
183 local_bh_enable();
184
185 if (!req)
186 return;
187
188 if (backlog)
189 backlog->complete(backlog, -EINPROGRESS);
190 req->complete(req, 0);
191
192 if (cpu_queue->queue.qlen)
193 queue_work(kcrypto_wq, &cpu_queue->work);
194 }
195
196 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
197 {
198 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
199 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
200 return ictx->queue;
201 }
202
203 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
204 u32 *mask)
205 {
206 struct crypto_attr_type *algt;
207
208 algt = crypto_get_attr_type(tb);
209 if (IS_ERR(algt))
210 return;
211
212 *type |= algt->type & CRYPTO_ALG_INTERNAL;
213 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
214 }
215
216 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
217 const u8 *key, unsigned int keylen)
218 {
219 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
220 struct crypto_blkcipher *child = ctx->child;
221 int err;
222
223 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
224 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
225 CRYPTO_TFM_REQ_MASK);
226 err = crypto_blkcipher_setkey(child, key, keylen);
227 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
228 CRYPTO_TFM_RES_MASK);
229 return err;
230 }
231
232 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
233 struct crypto_blkcipher *child,
234 int err,
235 int (*crypt)(struct blkcipher_desc *desc,
236 struct scatterlist *dst,
237 struct scatterlist *src,
238 unsigned int len))
239 {
240 struct cryptd_blkcipher_request_ctx *rctx;
241 struct cryptd_blkcipher_ctx *ctx;
242 struct crypto_ablkcipher *tfm;
243 struct blkcipher_desc desc;
244 int refcnt;
245
246 rctx = ablkcipher_request_ctx(req);
247
248 if (unlikely(err == -EINPROGRESS))
249 goto out;
250
251 desc.tfm = child;
252 desc.info = req->info;
253 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
254
255 err = crypt(&desc, req->dst, req->src, req->nbytes);
256
257 req->base.complete = rctx->complete;
258
259 out:
260 tfm = crypto_ablkcipher_reqtfm(req);
261 ctx = crypto_ablkcipher_ctx(tfm);
262 refcnt = atomic_read(&ctx->refcnt);
263
264 local_bh_disable();
265 rctx->complete(&req->base, err);
266 local_bh_enable();
267
268 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
269 crypto_free_ablkcipher(tfm);
270 }
271
272 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
273 {
274 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
275 struct crypto_blkcipher *child = ctx->child;
276
277 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
278 crypto_blkcipher_crt(child)->encrypt);
279 }
280
281 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
282 {
283 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
284 struct crypto_blkcipher *child = ctx->child;
285
286 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
287 crypto_blkcipher_crt(child)->decrypt);
288 }
289
290 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
291 crypto_completion_t compl)
292 {
293 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
294 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
295 struct cryptd_queue *queue;
296
297 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
298 rctx->complete = req->base.complete;
299 req->base.complete = compl;
300
301 return cryptd_enqueue_request(queue, &req->base);
302 }
303
304 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
305 {
306 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
307 }
308
309 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
310 {
311 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
312 }
313
314 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
315 {
316 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
317 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
318 struct crypto_spawn *spawn = &ictx->spawn;
319 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
320 struct crypto_blkcipher *cipher;
321
322 cipher = crypto_spawn_blkcipher(spawn);
323 if (IS_ERR(cipher))
324 return PTR_ERR(cipher);
325
326 ctx->child = cipher;
327 tfm->crt_ablkcipher.reqsize =
328 sizeof(struct cryptd_blkcipher_request_ctx);
329 return 0;
330 }
331
332 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
333 {
334 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
335
336 crypto_free_blkcipher(ctx->child);
337 }
338
339 static int cryptd_init_instance(struct crypto_instance *inst,
340 struct crypto_alg *alg)
341 {
342 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
343 "cryptd(%s)",
344 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
345 return -ENAMETOOLONG;
346
347 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
348
349 inst->alg.cra_priority = alg->cra_priority + 50;
350 inst->alg.cra_blocksize = alg->cra_blocksize;
351 inst->alg.cra_alignmask = alg->cra_alignmask;
352
353 return 0;
354 }
355
356 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
357 unsigned int tail)
358 {
359 char *p;
360 struct crypto_instance *inst;
361 int err;
362
363 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
364 if (!p)
365 return ERR_PTR(-ENOMEM);
366
367 inst = (void *)(p + head);
368
369 err = cryptd_init_instance(inst, alg);
370 if (err)
371 goto out_free_inst;
372
373 out:
374 return p;
375
376 out_free_inst:
377 kfree(p);
378 p = ERR_PTR(err);
379 goto out;
380 }
381
382 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
383 struct rtattr **tb,
384 struct cryptd_queue *queue)
385 {
386 struct cryptd_instance_ctx *ctx;
387 struct crypto_instance *inst;
388 struct crypto_alg *alg;
389 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
390 u32 mask = CRYPTO_ALG_TYPE_MASK;
391 int err;
392
393 cryptd_check_internal(tb, &type, &mask);
394
395 alg = crypto_get_attr_alg(tb, type, mask);
396 if (IS_ERR(alg))
397 return PTR_ERR(alg);
398
399 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
400 err = PTR_ERR(inst);
401 if (IS_ERR(inst))
402 goto out_put_alg;
403
404 ctx = crypto_instance_ctx(inst);
405 ctx->queue = queue;
406
407 err = crypto_init_spawn(&ctx->spawn, alg, inst,
408 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
409 if (err)
410 goto out_free_inst;
411
412 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
413 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
414 type |= CRYPTO_ALG_INTERNAL;
415 inst->alg.cra_flags = type;
416 inst->alg.cra_type = &crypto_ablkcipher_type;
417
418 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
419 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
420 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
421
422 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
423
424 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
425
426 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
427 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
428
429 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
430 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
431 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
432
433 err = crypto_register_instance(tmpl, inst);
434 if (err) {
435 crypto_drop_spawn(&ctx->spawn);
436 out_free_inst:
437 kfree(inst);
438 }
439
440 out_put_alg:
441 crypto_mod_put(alg);
442 return err;
443 }
444
445 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
446 const u8 *key, unsigned int keylen)
447 {
448 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
449 struct crypto_skcipher *child = ctx->child;
450 int err;
451
452 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
453 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
454 CRYPTO_TFM_REQ_MASK);
455 err = crypto_skcipher_setkey(child, key, keylen);
456 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
457 CRYPTO_TFM_RES_MASK);
458 return err;
459 }
460
461 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
462 {
463 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
464 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
465 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
466 int refcnt = atomic_read(&ctx->refcnt);
467
468 local_bh_disable();
469 rctx->complete(&req->base, err);
470 local_bh_enable();
471
472 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
473 crypto_free_skcipher(tfm);
474 }
475
476 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
477 int err)
478 {
479 struct skcipher_request *req = skcipher_request_cast(base);
480 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
481 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
482 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
483 struct crypto_skcipher *child = ctx->child;
484 SKCIPHER_REQUEST_ON_STACK(subreq, child);
485
486 if (unlikely(err == -EINPROGRESS))
487 goto out;
488
489 skcipher_request_set_tfm(subreq, child);
490 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
491 NULL, NULL);
492 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
493 req->iv);
494
495 err = crypto_skcipher_encrypt(subreq);
496 skcipher_request_zero(subreq);
497
498 req->base.complete = rctx->complete;
499
500 out:
501 cryptd_skcipher_complete(req, err);
502 }
503
504 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
505 int err)
506 {
507 struct skcipher_request *req = skcipher_request_cast(base);
508 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
509 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
510 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
511 struct crypto_skcipher *child = ctx->child;
512 SKCIPHER_REQUEST_ON_STACK(subreq, child);
513
514 if (unlikely(err == -EINPROGRESS))
515 goto out;
516
517 skcipher_request_set_tfm(subreq, child);
518 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
519 NULL, NULL);
520 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
521 req->iv);
522
523 err = crypto_skcipher_decrypt(subreq);
524 skcipher_request_zero(subreq);
525
526 req->base.complete = rctx->complete;
527
528 out:
529 cryptd_skcipher_complete(req, err);
530 }
531
532 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
533 crypto_completion_t compl)
534 {
535 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
536 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
537 struct cryptd_queue *queue;
538
539 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
540 rctx->complete = req->base.complete;
541 req->base.complete = compl;
542
543 return cryptd_enqueue_request(queue, &req->base);
544 }
545
546 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
547 {
548 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
549 }
550
551 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
552 {
553 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
554 }
555
556 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
557 {
558 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
559 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
560 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
561 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
562 struct crypto_skcipher *cipher;
563
564 cipher = crypto_spawn_skcipher(spawn);
565 if (IS_ERR(cipher))
566 return PTR_ERR(cipher);
567
568 ctx->child = cipher;
569 crypto_skcipher_set_reqsize(
570 tfm, sizeof(struct cryptd_skcipher_request_ctx));
571 return 0;
572 }
573
574 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
575 {
576 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
577
578 crypto_free_skcipher(ctx->child);
579 }
580
581 static void cryptd_skcipher_free(struct skcipher_instance *inst)
582 {
583 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
584
585 crypto_drop_skcipher(&ctx->spawn);
586 kfree(inst);
587 }
588
589 static int cryptd_create_skcipher(struct crypto_template *tmpl,
590 struct rtattr **tb,
591 struct cryptd_queue *queue)
592 {
593 struct skcipherd_instance_ctx *ctx;
594 struct skcipher_instance *inst;
595 struct skcipher_alg *alg;
596 const char *name;
597 u32 type;
598 u32 mask;
599 int err;
600
601 type = 0;
602 mask = CRYPTO_ALG_ASYNC;
603
604 cryptd_check_internal(tb, &type, &mask);
605
606 name = crypto_attr_alg_name(tb[1]);
607 if (IS_ERR(name))
608 return PTR_ERR(name);
609
610 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
611 if (!inst)
612 return -ENOMEM;
613
614 ctx = skcipher_instance_ctx(inst);
615 ctx->queue = queue;
616
617 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
618 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
619 if (err)
620 goto out_free_inst;
621
622 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
623 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
624 if (err)
625 goto out_drop_skcipher;
626
627 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
628 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
629
630 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
631 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
632 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
633 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
634
635 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
636
637 inst->alg.init = cryptd_skcipher_init_tfm;
638 inst->alg.exit = cryptd_skcipher_exit_tfm;
639
640 inst->alg.setkey = cryptd_skcipher_setkey;
641 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
642 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
643
644 inst->free = cryptd_skcipher_free;
645
646 err = skcipher_register_instance(tmpl, inst);
647 if (err) {
648 out_drop_skcipher:
649 crypto_drop_skcipher(&ctx->spawn);
650 out_free_inst:
651 kfree(inst);
652 }
653 return err;
654 }
655
656 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
657 {
658 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
659 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
660 struct crypto_shash_spawn *spawn = &ictx->spawn;
661 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
662 struct crypto_shash *hash;
663
664 hash = crypto_spawn_shash(spawn);
665 if (IS_ERR(hash))
666 return PTR_ERR(hash);
667
668 ctx->child = hash;
669 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
670 sizeof(struct cryptd_hash_request_ctx) +
671 crypto_shash_descsize(hash));
672 return 0;
673 }
674
675 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
676 {
677 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
678
679 crypto_free_shash(ctx->child);
680 }
681
682 static int cryptd_hash_setkey(struct crypto_ahash *parent,
683 const u8 *key, unsigned int keylen)
684 {
685 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
686 struct crypto_shash *child = ctx->child;
687 int err;
688
689 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
690 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
691 CRYPTO_TFM_REQ_MASK);
692 err = crypto_shash_setkey(child, key, keylen);
693 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
694 CRYPTO_TFM_RES_MASK);
695 return err;
696 }
697
698 static int cryptd_hash_enqueue(struct ahash_request *req,
699 crypto_completion_t compl)
700 {
701 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
702 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
703 struct cryptd_queue *queue =
704 cryptd_get_queue(crypto_ahash_tfm(tfm));
705
706 rctx->complete = req->base.complete;
707 req->base.complete = compl;
708
709 return cryptd_enqueue_request(queue, &req->base);
710 }
711
712 static void cryptd_hash_complete(struct ahash_request *req, int err)
713 {
714 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
715 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
716 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
717 int refcnt = atomic_read(&ctx->refcnt);
718
719 local_bh_disable();
720 rctx->complete(&req->base, err);
721 local_bh_enable();
722
723 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
724 crypto_free_ahash(tfm);
725 }
726
727 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
728 {
729 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
730 struct crypto_shash *child = ctx->child;
731 struct ahash_request *req = ahash_request_cast(req_async);
732 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
733 struct shash_desc *desc = &rctx->desc;
734
735 if (unlikely(err == -EINPROGRESS))
736 goto out;
737
738 desc->tfm = child;
739 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
740
741 err = crypto_shash_init(desc);
742
743 req->base.complete = rctx->complete;
744
745 out:
746 cryptd_hash_complete(req, err);
747 }
748
749 static int cryptd_hash_init_enqueue(struct ahash_request *req)
750 {
751 return cryptd_hash_enqueue(req, cryptd_hash_init);
752 }
753
754 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
755 {
756 struct ahash_request *req = ahash_request_cast(req_async);
757 struct cryptd_hash_request_ctx *rctx;
758
759 rctx = ahash_request_ctx(req);
760
761 if (unlikely(err == -EINPROGRESS))
762 goto out;
763
764 err = shash_ahash_update(req, &rctx->desc);
765
766 req->base.complete = rctx->complete;
767
768 out:
769 cryptd_hash_complete(req, err);
770 }
771
772 static int cryptd_hash_update_enqueue(struct ahash_request *req)
773 {
774 return cryptd_hash_enqueue(req, cryptd_hash_update);
775 }
776
777 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
778 {
779 struct ahash_request *req = ahash_request_cast(req_async);
780 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
781
782 if (unlikely(err == -EINPROGRESS))
783 goto out;
784
785 err = crypto_shash_final(&rctx->desc, req->result);
786
787 req->base.complete = rctx->complete;
788
789 out:
790 cryptd_hash_complete(req, err);
791 }
792
793 static int cryptd_hash_final_enqueue(struct ahash_request *req)
794 {
795 return cryptd_hash_enqueue(req, cryptd_hash_final);
796 }
797
798 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
799 {
800 struct ahash_request *req = ahash_request_cast(req_async);
801 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
802
803 if (unlikely(err == -EINPROGRESS))
804 goto out;
805
806 err = shash_ahash_finup(req, &rctx->desc);
807
808 req->base.complete = rctx->complete;
809
810 out:
811 cryptd_hash_complete(req, err);
812 }
813
814 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
815 {
816 return cryptd_hash_enqueue(req, cryptd_hash_finup);
817 }
818
819 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
820 {
821 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
822 struct crypto_shash *child = ctx->child;
823 struct ahash_request *req = ahash_request_cast(req_async);
824 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
825 struct shash_desc *desc = &rctx->desc;
826
827 if (unlikely(err == -EINPROGRESS))
828 goto out;
829
830 desc->tfm = child;
831 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
832
833 err = shash_ahash_digest(req, desc);
834
835 req->base.complete = rctx->complete;
836
837 out:
838 cryptd_hash_complete(req, err);
839 }
840
841 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
842 {
843 return cryptd_hash_enqueue(req, cryptd_hash_digest);
844 }
845
846 static int cryptd_hash_export(struct ahash_request *req, void *out)
847 {
848 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
849
850 return crypto_shash_export(&rctx->desc, out);
851 }
852
853 static int cryptd_hash_import(struct ahash_request *req, const void *in)
854 {
855 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
856 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
857 struct shash_desc *desc = cryptd_shash_desc(req);
858
859 desc->tfm = ctx->child;
860 desc->flags = req->base.flags;
861
862 return crypto_shash_import(desc, in);
863 }
864
865 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
866 struct cryptd_queue *queue)
867 {
868 struct hashd_instance_ctx *ctx;
869 struct ahash_instance *inst;
870 struct shash_alg *salg;
871 struct crypto_alg *alg;
872 u32 type = 0;
873 u32 mask = 0;
874 int err;
875
876 cryptd_check_internal(tb, &type, &mask);
877
878 salg = shash_attr_alg(tb[1], type, mask);
879 if (IS_ERR(salg))
880 return PTR_ERR(salg);
881
882 alg = &salg->base;
883 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
884 sizeof(*ctx));
885 err = PTR_ERR(inst);
886 if (IS_ERR(inst))
887 goto out_put_alg;
888
889 ctx = ahash_instance_ctx(inst);
890 ctx->queue = queue;
891
892 err = crypto_init_shash_spawn(&ctx->spawn, salg,
893 ahash_crypto_instance(inst));
894 if (err)
895 goto out_free_inst;
896
897 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
898 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
899 CRYPTO_ALG_OPTIONAL_KEY));
900
901 inst->alg.halg.digestsize = salg->digestsize;
902 inst->alg.halg.statesize = salg->statesize;
903 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
904
905 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
906 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
907
908 inst->alg.init = cryptd_hash_init_enqueue;
909 inst->alg.update = cryptd_hash_update_enqueue;
910 inst->alg.final = cryptd_hash_final_enqueue;
911 inst->alg.finup = cryptd_hash_finup_enqueue;
912 inst->alg.export = cryptd_hash_export;
913 inst->alg.import = cryptd_hash_import;
914 if (crypto_shash_alg_has_setkey(salg))
915 inst->alg.setkey = cryptd_hash_setkey;
916 inst->alg.digest = cryptd_hash_digest_enqueue;
917
918 err = ahash_register_instance(tmpl, inst);
919 if (err) {
920 crypto_drop_shash(&ctx->spawn);
921 out_free_inst:
922 kfree(inst);
923 }
924
925 out_put_alg:
926 crypto_mod_put(alg);
927 return err;
928 }
929
930 static int cryptd_aead_setkey(struct crypto_aead *parent,
931 const u8 *key, unsigned int keylen)
932 {
933 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
934 struct crypto_aead *child = ctx->child;
935
936 return crypto_aead_setkey(child, key, keylen);
937 }
938
939 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
940 unsigned int authsize)
941 {
942 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
943 struct crypto_aead *child = ctx->child;
944
945 return crypto_aead_setauthsize(child, authsize);
946 }
947
948 static void cryptd_aead_crypt(struct aead_request *req,
949 struct crypto_aead *child,
950 int err,
951 int (*crypt)(struct aead_request *req))
952 {
953 struct cryptd_aead_request_ctx *rctx;
954 struct cryptd_aead_ctx *ctx;
955 crypto_completion_t compl;
956 struct crypto_aead *tfm;
957 int refcnt;
958
959 rctx = aead_request_ctx(req);
960 compl = rctx->complete;
961
962 tfm = crypto_aead_reqtfm(req);
963
964 if (unlikely(err == -EINPROGRESS))
965 goto out;
966 aead_request_set_tfm(req, child);
967 err = crypt( req );
968
969 out:
970 ctx = crypto_aead_ctx(tfm);
971 refcnt = atomic_read(&ctx->refcnt);
972
973 local_bh_disable();
974 compl(&req->base, err);
975 local_bh_enable();
976
977 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
978 crypto_free_aead(tfm);
979 }
980
981 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
982 {
983 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
984 struct crypto_aead *child = ctx->child;
985 struct aead_request *req;
986
987 req = container_of(areq, struct aead_request, base);
988 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
989 }
990
991 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
992 {
993 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
994 struct crypto_aead *child = ctx->child;
995 struct aead_request *req;
996
997 req = container_of(areq, struct aead_request, base);
998 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
999 }
1000
1001 static int cryptd_aead_enqueue(struct aead_request *req,
1002 crypto_completion_t compl)
1003 {
1004 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1005 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1006 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1007
1008 rctx->complete = req->base.complete;
1009 req->base.complete = compl;
1010 return cryptd_enqueue_request(queue, &req->base);
1011 }
1012
1013 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1014 {
1015 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1016 }
1017
1018 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1019 {
1020 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1021 }
1022
1023 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1024 {
1025 struct aead_instance *inst = aead_alg_instance(tfm);
1026 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1027 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1028 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1029 struct crypto_aead *cipher;
1030
1031 cipher = crypto_spawn_aead(spawn);
1032 if (IS_ERR(cipher))
1033 return PTR_ERR(cipher);
1034
1035 ctx->child = cipher;
1036 crypto_aead_set_reqsize(
1037 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1038 crypto_aead_reqsize(cipher)));
1039 return 0;
1040 }
1041
1042 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1043 {
1044 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1045 crypto_free_aead(ctx->child);
1046 }
1047
1048 static int cryptd_create_aead(struct crypto_template *tmpl,
1049 struct rtattr **tb,
1050 struct cryptd_queue *queue)
1051 {
1052 struct aead_instance_ctx *ctx;
1053 struct aead_instance *inst;
1054 struct aead_alg *alg;
1055 const char *name;
1056 u32 type = 0;
1057 u32 mask = CRYPTO_ALG_ASYNC;
1058 int err;
1059
1060 cryptd_check_internal(tb, &type, &mask);
1061
1062 name = crypto_attr_alg_name(tb[1]);
1063 if (IS_ERR(name))
1064 return PTR_ERR(name);
1065
1066 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1067 if (!inst)
1068 return -ENOMEM;
1069
1070 ctx = aead_instance_ctx(inst);
1071 ctx->queue = queue;
1072
1073 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1074 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1075 if (err)
1076 goto out_free_inst;
1077
1078 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1079 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1080 if (err)
1081 goto out_drop_aead;
1082
1083 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1084 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1085 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1086
1087 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1088 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1089
1090 inst->alg.init = cryptd_aead_init_tfm;
1091 inst->alg.exit = cryptd_aead_exit_tfm;
1092 inst->alg.setkey = cryptd_aead_setkey;
1093 inst->alg.setauthsize = cryptd_aead_setauthsize;
1094 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1095 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1096
1097 err = aead_register_instance(tmpl, inst);
1098 if (err) {
1099 out_drop_aead:
1100 crypto_drop_aead(&ctx->aead_spawn);
1101 out_free_inst:
1102 kfree(inst);
1103 }
1104 return err;
1105 }
1106
1107 static struct cryptd_queue queue;
1108
1109 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1110 {
1111 struct crypto_attr_type *algt;
1112
1113 algt = crypto_get_attr_type(tb);
1114 if (IS_ERR(algt))
1115 return PTR_ERR(algt);
1116
1117 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1118 case CRYPTO_ALG_TYPE_BLKCIPHER:
1119 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1120 CRYPTO_ALG_TYPE_BLKCIPHER)
1121 return cryptd_create_blkcipher(tmpl, tb, &queue);
1122
1123 return cryptd_create_skcipher(tmpl, tb, &queue);
1124 case CRYPTO_ALG_TYPE_DIGEST:
1125 return cryptd_create_hash(tmpl, tb, &queue);
1126 case CRYPTO_ALG_TYPE_AEAD:
1127 return cryptd_create_aead(tmpl, tb, &queue);
1128 }
1129
1130 return -EINVAL;
1131 }
1132
1133 static void cryptd_free(struct crypto_instance *inst)
1134 {
1135 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1136 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1137 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1138
1139 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1140 case CRYPTO_ALG_TYPE_AHASH:
1141 crypto_drop_shash(&hctx->spawn);
1142 kfree(ahash_instance(inst));
1143 return;
1144 case CRYPTO_ALG_TYPE_AEAD:
1145 crypto_drop_aead(&aead_ctx->aead_spawn);
1146 kfree(aead_instance(inst));
1147 return;
1148 default:
1149 crypto_drop_spawn(&ctx->spawn);
1150 kfree(inst);
1151 }
1152 }
1153
1154 static struct crypto_template cryptd_tmpl = {
1155 .name = "cryptd",
1156 .create = cryptd_create,
1157 .free = cryptd_free,
1158 .module = THIS_MODULE,
1159 };
1160
1161 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1162 u32 type, u32 mask)
1163 {
1164 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1165 struct cryptd_blkcipher_ctx *ctx;
1166 struct crypto_tfm *tfm;
1167
1168 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1169 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1170 return ERR_PTR(-EINVAL);
1171 type = crypto_skcipher_type(type);
1172 mask &= ~CRYPTO_ALG_TYPE_MASK;
1173 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1174 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1175 if (IS_ERR(tfm))
1176 return ERR_CAST(tfm);
1177 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1178 crypto_free_tfm(tfm);
1179 return ERR_PTR(-EINVAL);
1180 }
1181
1182 ctx = crypto_tfm_ctx(tfm);
1183 atomic_set(&ctx->refcnt, 1);
1184
1185 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1186 }
1187 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1188
1189 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1190 {
1191 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1192 return ctx->child;
1193 }
1194 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1195
1196 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1197 {
1198 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1199
1200 return atomic_read(&ctx->refcnt) - 1;
1201 }
1202 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1203
1204 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1205 {
1206 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1207
1208 if (atomic_dec_and_test(&ctx->refcnt))
1209 crypto_free_ablkcipher(&tfm->base);
1210 }
1211 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1212
1213 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1214 u32 type, u32 mask)
1215 {
1216 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1217 struct cryptd_skcipher_ctx *ctx;
1218 struct crypto_skcipher *tfm;
1219
1220 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1221 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1222 return ERR_PTR(-EINVAL);
1223
1224 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1225 if (IS_ERR(tfm))
1226 return ERR_CAST(tfm);
1227
1228 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1229 crypto_free_skcipher(tfm);
1230 return ERR_PTR(-EINVAL);
1231 }
1232
1233 ctx = crypto_skcipher_ctx(tfm);
1234 atomic_set(&ctx->refcnt, 1);
1235
1236 return container_of(tfm, struct cryptd_skcipher, base);
1237 }
1238 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1239
1240 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1241 {
1242 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1243
1244 return ctx->child;
1245 }
1246 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1247
1248 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1249 {
1250 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1251
1252 return atomic_read(&ctx->refcnt) - 1;
1253 }
1254 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1255
1256 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1257 {
1258 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1259
1260 if (atomic_dec_and_test(&ctx->refcnt))
1261 crypto_free_skcipher(&tfm->base);
1262 }
1263 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1264
1265 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1266 u32 type, u32 mask)
1267 {
1268 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1269 struct cryptd_hash_ctx *ctx;
1270 struct crypto_ahash *tfm;
1271
1272 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1273 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1274 return ERR_PTR(-EINVAL);
1275 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1276 if (IS_ERR(tfm))
1277 return ERR_CAST(tfm);
1278 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1279 crypto_free_ahash(tfm);
1280 return ERR_PTR(-EINVAL);
1281 }
1282
1283 ctx = crypto_ahash_ctx(tfm);
1284 atomic_set(&ctx->refcnt, 1);
1285
1286 return __cryptd_ahash_cast(tfm);
1287 }
1288 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1289
1290 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1291 {
1292 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1293
1294 return ctx->child;
1295 }
1296 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1297
1298 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1299 {
1300 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1301 return &rctx->desc;
1302 }
1303 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1304
1305 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1306 {
1307 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1308
1309 return atomic_read(&ctx->refcnt) - 1;
1310 }
1311 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1312
1313 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1314 {
1315 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1316
1317 if (atomic_dec_and_test(&ctx->refcnt))
1318 crypto_free_ahash(&tfm->base);
1319 }
1320 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1321
1322 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1323 u32 type, u32 mask)
1324 {
1325 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1326 struct cryptd_aead_ctx *ctx;
1327 struct crypto_aead *tfm;
1328
1329 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1330 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1331 return ERR_PTR(-EINVAL);
1332 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1333 if (IS_ERR(tfm))
1334 return ERR_CAST(tfm);
1335 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1336 crypto_free_aead(tfm);
1337 return ERR_PTR(-EINVAL);
1338 }
1339
1340 ctx = crypto_aead_ctx(tfm);
1341 atomic_set(&ctx->refcnt, 1);
1342
1343 return __cryptd_aead_cast(tfm);
1344 }
1345 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1346
1347 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1348 {
1349 struct cryptd_aead_ctx *ctx;
1350 ctx = crypto_aead_ctx(&tfm->base);
1351 return ctx->child;
1352 }
1353 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1354
1355 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1356 {
1357 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1358
1359 return atomic_read(&ctx->refcnt) - 1;
1360 }
1361 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1362
1363 void cryptd_free_aead(struct cryptd_aead *tfm)
1364 {
1365 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1366
1367 if (atomic_dec_and_test(&ctx->refcnt))
1368 crypto_free_aead(&tfm->base);
1369 }
1370 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1371
1372 static int __init cryptd_init(void)
1373 {
1374 int err;
1375
1376 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1377 if (err)
1378 return err;
1379
1380 err = crypto_register_template(&cryptd_tmpl);
1381 if (err)
1382 cryptd_fini_queue(&queue);
1383
1384 return err;
1385 }
1386
1387 static void __exit cryptd_exit(void)
1388 {
1389 cryptd_fini_queue(&queue);
1390 crypto_unregister_template(&cryptd_tmpl);
1391 }
1392
1393 subsys_initcall(cryptd_init);
1394 module_exit(cryptd_exit);
1395
1396 MODULE_LICENSE("GPL");
1397 MODULE_DESCRIPTION("Software async crypto daemon");
1398 MODULE_ALIAS_CRYPTO("cryptd");