]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/cryptd.c
crypto: cryptd - Fix uninitialized return value
[mirror_ubuntu-bionic-kernel.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/cryptd.h>
16 #include <crypto/crypto_wq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25
26 #define CRYPTD_MAX_CPU_QLEN 100
27
28 struct cryptd_cpu_queue {
29 struct crypto_queue queue;
30 struct work_struct work;
31 };
32
33 struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
35 };
36
37 struct cryptd_instance_ctx {
38 struct crypto_spawn spawn;
39 struct cryptd_queue *queue;
40 };
41
42 struct hashd_instance_ctx {
43 struct crypto_shash_spawn spawn;
44 struct cryptd_queue *queue;
45 };
46
47 struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child;
49 };
50
51 struct cryptd_blkcipher_request_ctx {
52 crypto_completion_t complete;
53 };
54
55 struct cryptd_hash_ctx {
56 struct crypto_shash *child;
57 };
58
59 struct cryptd_hash_request_ctx {
60 crypto_completion_t complete;
61 struct shash_desc desc;
62 };
63
64 static void cryptd_queue_worker(struct work_struct *work);
65
66 static int cryptd_init_queue(struct cryptd_queue *queue,
67 unsigned int max_cpu_qlen)
68 {
69 int cpu;
70 struct cryptd_cpu_queue *cpu_queue;
71
72 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
73 if (!queue->cpu_queue)
74 return -ENOMEM;
75 for_each_possible_cpu(cpu) {
76 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
77 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
78 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
79 }
80 return 0;
81 }
82
83 static void cryptd_fini_queue(struct cryptd_queue *queue)
84 {
85 int cpu;
86 struct cryptd_cpu_queue *cpu_queue;
87
88 for_each_possible_cpu(cpu) {
89 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
90 BUG_ON(cpu_queue->queue.qlen);
91 }
92 free_percpu(queue->cpu_queue);
93 }
94
95 static int cryptd_enqueue_request(struct cryptd_queue *queue,
96 struct crypto_async_request *request)
97 {
98 int cpu, err;
99 struct cryptd_cpu_queue *cpu_queue;
100
101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu();
106
107 return err;
108 }
109
110 /* Called in workqueue context, do one real cryption work (via
111 * req->complete) and reschedule itself if there are more work to
112 * do. */
113 static void cryptd_queue_worker(struct work_struct *work)
114 {
115 struct cryptd_cpu_queue *cpu_queue;
116 struct crypto_async_request *req, *backlog;
117
118 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
119 /* Only handle one request at a time to avoid hogging crypto
120 * workqueue. preempt_disable/enable is used to prevent
121 * being preempted by cryptd_enqueue_request() */
122 preempt_disable();
123 backlog = crypto_get_backlog(&cpu_queue->queue);
124 req = crypto_dequeue_request(&cpu_queue->queue);
125 preempt_enable();
126
127 if (!req)
128 return;
129
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132 req->complete(req, 0);
133
134 if (cpu_queue->queue.qlen)
135 queue_work(kcrypto_wq, &cpu_queue->work);
136 }
137
138 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
139 {
140 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
141 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
142 return ictx->queue;
143 }
144
145 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
146 const u8 *key, unsigned int keylen)
147 {
148 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
149 struct crypto_blkcipher *child = ctx->child;
150 int err;
151
152 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
153 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
154 CRYPTO_TFM_REQ_MASK);
155 err = crypto_blkcipher_setkey(child, key, keylen);
156 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
157 CRYPTO_TFM_RES_MASK);
158 return err;
159 }
160
161 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
162 struct crypto_blkcipher *child,
163 int err,
164 int (*crypt)(struct blkcipher_desc *desc,
165 struct scatterlist *dst,
166 struct scatterlist *src,
167 unsigned int len))
168 {
169 struct cryptd_blkcipher_request_ctx *rctx;
170 struct blkcipher_desc desc;
171
172 rctx = ablkcipher_request_ctx(req);
173
174 if (unlikely(err == -EINPROGRESS))
175 goto out;
176
177 desc.tfm = child;
178 desc.info = req->info;
179 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
180
181 err = crypt(&desc, req->dst, req->src, req->nbytes);
182
183 req->base.complete = rctx->complete;
184
185 out:
186 local_bh_disable();
187 rctx->complete(&req->base, err);
188 local_bh_enable();
189 }
190
191 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
192 {
193 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
194 struct crypto_blkcipher *child = ctx->child;
195
196 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
197 crypto_blkcipher_crt(child)->encrypt);
198 }
199
200 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
201 {
202 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
203 struct crypto_blkcipher *child = ctx->child;
204
205 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
206 crypto_blkcipher_crt(child)->decrypt);
207 }
208
209 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
210 crypto_completion_t complete)
211 {
212 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
213 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
214 struct cryptd_queue *queue;
215
216 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
217 rctx->complete = req->base.complete;
218 req->base.complete = complete;
219
220 return cryptd_enqueue_request(queue, &req->base);
221 }
222
223 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
224 {
225 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
226 }
227
228 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
229 {
230 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
231 }
232
233 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
234 {
235 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
236 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
237 struct crypto_spawn *spawn = &ictx->spawn;
238 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct crypto_blkcipher *cipher;
240
241 cipher = crypto_spawn_blkcipher(spawn);
242 if (IS_ERR(cipher))
243 return PTR_ERR(cipher);
244
245 ctx->child = cipher;
246 tfm->crt_ablkcipher.reqsize =
247 sizeof(struct cryptd_blkcipher_request_ctx);
248 return 0;
249 }
250
251 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
252 {
253 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 crypto_free_blkcipher(ctx->child);
256 }
257
258 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
259 unsigned int tail)
260 {
261 char *p;
262 struct crypto_instance *inst;
263 int err;
264
265 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
266 if (!p)
267 return ERR_PTR(-ENOMEM);
268
269 inst = (void *)(p + head);
270
271 err = -ENAMETOOLONG;
272 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
273 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
274 goto out_free_inst;
275
276 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
277
278 inst->alg.cra_priority = alg->cra_priority + 50;
279 inst->alg.cra_blocksize = alg->cra_blocksize;
280 inst->alg.cra_alignmask = alg->cra_alignmask;
281
282 out:
283 return p;
284
285 out_free_inst:
286 kfree(p);
287 p = ERR_PTR(err);
288 goto out;
289 }
290
291 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
292 struct rtattr **tb,
293 struct cryptd_queue *queue)
294 {
295 struct cryptd_instance_ctx *ctx;
296 struct crypto_instance *inst;
297 struct crypto_alg *alg;
298 int err;
299
300 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
301 CRYPTO_ALG_TYPE_MASK);
302 if (IS_ERR(alg))
303 return PTR_ERR(alg);
304
305 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
306 err = PTR_ERR(inst);
307 if (IS_ERR(inst))
308 goto out_put_alg;
309
310 ctx = crypto_instance_ctx(inst);
311 ctx->queue = queue;
312
313 err = crypto_init_spawn(&ctx->spawn, alg, inst,
314 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
315 if (err)
316 goto out_free_inst;
317
318 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
319 inst->alg.cra_type = &crypto_ablkcipher_type;
320
321 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
322 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
323 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
324
325 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
326
327 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
328
329 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
330 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
331
332 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
333 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
334 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
335
336 err = crypto_register_instance(tmpl, inst);
337 if (err) {
338 crypto_drop_spawn(&ctx->spawn);
339 out_free_inst:
340 kfree(inst);
341 }
342
343 out_put_alg:
344 crypto_mod_put(alg);
345 return err;
346 }
347
348 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
349 {
350 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
351 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
352 struct crypto_shash_spawn *spawn = &ictx->spawn;
353 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
354 struct crypto_shash *hash;
355
356 hash = crypto_spawn_shash(spawn);
357 if (IS_ERR(hash))
358 return PTR_ERR(hash);
359
360 ctx->child = hash;
361 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
362 sizeof(struct cryptd_hash_request_ctx) +
363 crypto_shash_descsize(hash));
364 return 0;
365 }
366
367 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
368 {
369 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
370
371 crypto_free_shash(ctx->child);
372 }
373
374 static int cryptd_hash_setkey(struct crypto_ahash *parent,
375 const u8 *key, unsigned int keylen)
376 {
377 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
378 struct crypto_shash *child = ctx->child;
379 int err;
380
381 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
382 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
383 CRYPTO_TFM_REQ_MASK);
384 err = crypto_shash_setkey(child, key, keylen);
385 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
386 CRYPTO_TFM_RES_MASK);
387 return err;
388 }
389
390 static int cryptd_hash_enqueue(struct ahash_request *req,
391 crypto_completion_t complete)
392 {
393 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
394 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
395 struct cryptd_queue *queue =
396 cryptd_get_queue(crypto_ahash_tfm(tfm));
397
398 rctx->complete = req->base.complete;
399 req->base.complete = complete;
400
401 return cryptd_enqueue_request(queue, &req->base);
402 }
403
404 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
405 {
406 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
407 struct crypto_shash *child = ctx->child;
408 struct ahash_request *req = ahash_request_cast(req_async);
409 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
410 struct shash_desc *desc = &rctx->desc;
411
412 if (unlikely(err == -EINPROGRESS))
413 goto out;
414
415 desc->tfm = child;
416 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
417
418 err = crypto_shash_init(desc);
419
420 req->base.complete = rctx->complete;
421
422 out:
423 local_bh_disable();
424 rctx->complete(&req->base, err);
425 local_bh_enable();
426 }
427
428 static int cryptd_hash_init_enqueue(struct ahash_request *req)
429 {
430 return cryptd_hash_enqueue(req, cryptd_hash_init);
431 }
432
433 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
434 {
435 struct ahash_request *req = ahash_request_cast(req_async);
436 struct cryptd_hash_request_ctx *rctx;
437
438 rctx = ahash_request_ctx(req);
439
440 if (unlikely(err == -EINPROGRESS))
441 goto out;
442
443 err = shash_ahash_update(req, &rctx->desc);
444
445 req->base.complete = rctx->complete;
446
447 out:
448 local_bh_disable();
449 rctx->complete(&req->base, err);
450 local_bh_enable();
451 }
452
453 static int cryptd_hash_update_enqueue(struct ahash_request *req)
454 {
455 return cryptd_hash_enqueue(req, cryptd_hash_update);
456 }
457
458 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
459 {
460 struct ahash_request *req = ahash_request_cast(req_async);
461 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
462
463 if (unlikely(err == -EINPROGRESS))
464 goto out;
465
466 err = crypto_shash_final(&rctx->desc, req->result);
467
468 req->base.complete = rctx->complete;
469
470 out:
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
474 }
475
476 static int cryptd_hash_final_enqueue(struct ahash_request *req)
477 {
478 return cryptd_hash_enqueue(req, cryptd_hash_final);
479 }
480
481 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
482 {
483 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
484 struct crypto_shash *child = ctx->child;
485 struct ahash_request *req = ahash_request_cast(req_async);
486 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
487 struct shash_desc *desc = &rctx->desc;
488
489 if (unlikely(err == -EINPROGRESS))
490 goto out;
491
492 desc->tfm = child;
493 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
494
495 err = shash_ahash_digest(req, desc);
496
497 req->base.complete = rctx->complete;
498
499 out:
500 local_bh_disable();
501 rctx->complete(&req->base, err);
502 local_bh_enable();
503 }
504
505 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
506 {
507 return cryptd_hash_enqueue(req, cryptd_hash_digest);
508 }
509
510 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
511 struct cryptd_queue *queue)
512 {
513 struct hashd_instance_ctx *ctx;
514 struct ahash_instance *inst;
515 struct shash_alg *salg;
516 struct crypto_alg *alg;
517 int err;
518
519 salg = shash_attr_alg(tb[1], 0, 0);
520 if (IS_ERR(salg))
521 return PTR_ERR(salg);
522
523 alg = &salg->base;
524 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
525 sizeof(*ctx));
526 err = PTR_ERR(inst);
527 if (IS_ERR(inst))
528 goto out_put_alg;
529
530 ctx = ahash_instance_ctx(inst);
531 ctx->queue = queue;
532
533 err = crypto_init_shash_spawn(&ctx->spawn, salg,
534 ahash_crypto_instance(inst));
535 if (err)
536 goto out_free_inst;
537
538 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
539
540 inst->alg.halg.digestsize = salg->digestsize;
541 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
542
543 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
544 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
545
546 inst->alg.init = cryptd_hash_init_enqueue;
547 inst->alg.update = cryptd_hash_update_enqueue;
548 inst->alg.final = cryptd_hash_final_enqueue;
549 inst->alg.setkey = cryptd_hash_setkey;
550 inst->alg.digest = cryptd_hash_digest_enqueue;
551
552 err = ahash_register_instance(tmpl, inst);
553 if (err) {
554 crypto_drop_shash(&ctx->spawn);
555 out_free_inst:
556 kfree(inst);
557 }
558
559 out_put_alg:
560 crypto_mod_put(alg);
561 return err;
562 }
563
564 static struct cryptd_queue queue;
565
566 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
567 {
568 struct crypto_attr_type *algt;
569
570 algt = crypto_get_attr_type(tb);
571 if (IS_ERR(algt))
572 return PTR_ERR(algt);
573
574 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
575 case CRYPTO_ALG_TYPE_BLKCIPHER:
576 return cryptd_create_blkcipher(tmpl, tb, &queue);
577 case CRYPTO_ALG_TYPE_DIGEST:
578 return cryptd_create_hash(tmpl, tb, &queue);
579 }
580
581 return -EINVAL;
582 }
583
584 static void cryptd_free(struct crypto_instance *inst)
585 {
586 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
587 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
588
589 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
590 case CRYPTO_ALG_TYPE_AHASH:
591 crypto_drop_shash(&hctx->spawn);
592 kfree(ahash_instance(inst));
593 return;
594 }
595
596 crypto_drop_spawn(&ctx->spawn);
597 kfree(inst);
598 }
599
600 static struct crypto_template cryptd_tmpl = {
601 .name = "cryptd",
602 .create = cryptd_create,
603 .free = cryptd_free,
604 .module = THIS_MODULE,
605 };
606
607 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
608 u32 type, u32 mask)
609 {
610 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
611 struct crypto_tfm *tfm;
612
613 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
614 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
615 return ERR_PTR(-EINVAL);
616 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
617 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
618 mask &= ~CRYPTO_ALG_TYPE_MASK;
619 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
620 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
621 if (IS_ERR(tfm))
622 return ERR_CAST(tfm);
623 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
624 crypto_free_tfm(tfm);
625 return ERR_PTR(-EINVAL);
626 }
627
628 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
629 }
630 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
631
632 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
633 {
634 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
635 return ctx->child;
636 }
637 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
638
639 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
640 {
641 crypto_free_ablkcipher(&tfm->base);
642 }
643 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
644
645 static int __init cryptd_init(void)
646 {
647 int err;
648
649 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
650 if (err)
651 return err;
652
653 err = crypto_register_template(&cryptd_tmpl);
654 if (err)
655 cryptd_fini_queue(&queue);
656
657 return err;
658 }
659
660 static void __exit cryptd_exit(void)
661 {
662 cryptd_fini_queue(&queue);
663 crypto_unregister_template(&cryptd_tmpl);
664 }
665
666 module_init(cryptd_init);
667 module_exit(cryptd_exit);
668
669 MODULE_LICENSE("GPL");
670 MODULE_DESCRIPTION("Software async crypto daemon");