]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/mcryptd.c
HID: pidff: effect can't be NULL
[mirror_ubuntu-artful-kernel.git] / crypto / mcryptd.c
1 /*
2 * Software multibuffer async crypto daemon.
3 *
4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
5 *
6 * Adapted from crypto daemon.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15 #include <crypto/algapi.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/aead.h>
18 #include <crypto/mcryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/hardirq.h>
29
30 #define MCRYPTD_MAX_CPU_QLEN 100
31 #define MCRYPTD_BATCH 9
32
33 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34 unsigned int tail);
35
36 struct mcryptd_flush_list {
37 struct list_head list;
38 struct mutex lock;
39 };
40
41 static struct mcryptd_flush_list __percpu *mcryptd_flist;
42
43 struct hashd_instance_ctx {
44 struct crypto_shash_spawn spawn;
45 struct mcryptd_queue *queue;
46 };
47
48 static void mcryptd_queue_worker(struct work_struct *work);
49
50 void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
51 {
52 struct mcryptd_flush_list *flist;
53
54 if (!cstate->flusher_engaged) {
55 /* put the flusher on the flush list */
56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57 mutex_lock(&flist->lock);
58 list_add_tail(&cstate->flush_list, &flist->list);
59 cstate->flusher_engaged = true;
60 cstate->next_flush = jiffies + delay;
61 queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62 &cstate->flush, delay);
63 mutex_unlock(&flist->lock);
64 }
65 }
66 EXPORT_SYMBOL(mcryptd_arm_flusher);
67
68 static int mcryptd_init_queue(struct mcryptd_queue *queue,
69 unsigned int max_cpu_qlen)
70 {
71 int cpu;
72 struct mcryptd_cpu_queue *cpu_queue;
73
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76 if (!queue->cpu_queue)
77 return -ENOMEM;
78 for_each_possible_cpu(cpu) {
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83 }
84 return 0;
85 }
86
87 static void mcryptd_fini_queue(struct mcryptd_queue *queue)
88 {
89 int cpu;
90 struct mcryptd_cpu_queue *cpu_queue;
91
92 for_each_possible_cpu(cpu) {
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
94 BUG_ON(cpu_queue->queue.qlen);
95 }
96 free_percpu(queue->cpu_queue);
97 }
98
99 static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
100 struct crypto_async_request *request,
101 struct mcryptd_hash_request_ctx *rctx)
102 {
103 int cpu, err;
104 struct mcryptd_cpu_queue *cpu_queue;
105
106 cpu = get_cpu();
107 cpu_queue = this_cpu_ptr(queue->cpu_queue);
108 rctx->tag.cpu = cpu;
109
110 err = crypto_enqueue_request(&cpu_queue->queue, request);
111 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112 cpu, cpu_queue, request);
113 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114 put_cpu();
115
116 return err;
117 }
118
119 /*
120 * Try to opportunisticlly flush the partially completed jobs if
121 * crypto daemon is the only task running.
122 */
123 static void mcryptd_opportunistic_flush(void)
124 {
125 struct mcryptd_flush_list *flist;
126 struct mcryptd_alg_cstate *cstate;
127
128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
129 while (single_task_running()) {
130 mutex_lock(&flist->lock);
131 if (list_empty(&flist->list)) {
132 mutex_unlock(&flist->lock);
133 return;
134 }
135 cstate = list_entry(flist->list.next,
136 struct mcryptd_alg_cstate, flush_list);
137 if (!cstate->flusher_engaged) {
138 mutex_unlock(&flist->lock);
139 return;
140 }
141 list_del(&cstate->flush_list);
142 cstate->flusher_engaged = false;
143 mutex_unlock(&flist->lock);
144 cstate->alg_state->flusher(cstate);
145 }
146 }
147
148 /*
149 * Called in workqueue context, do one real cryption work (via
150 * req->complete) and reschedule itself if there are more work to
151 * do.
152 */
153 static void mcryptd_queue_worker(struct work_struct *work)
154 {
155 struct mcryptd_cpu_queue *cpu_queue;
156 struct crypto_async_request *req, *backlog;
157 int i;
158
159 /*
160 * Need to loop through more than once for multi-buffer to
161 * be effective.
162 */
163
164 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
165 i = 0;
166 while (i < MCRYPTD_BATCH || single_task_running()) {
167 /*
168 * preempt_disable/enable is used to prevent
169 * being preempted by mcryptd_enqueue_request()
170 */
171 local_bh_disable();
172 preempt_disable();
173 backlog = crypto_get_backlog(&cpu_queue->queue);
174 req = crypto_dequeue_request(&cpu_queue->queue);
175 preempt_enable();
176 local_bh_enable();
177
178 if (!req) {
179 mcryptd_opportunistic_flush();
180 return;
181 }
182
183 if (backlog)
184 backlog->complete(backlog, -EINPROGRESS);
185 req->complete(req, 0);
186 if (!cpu_queue->queue.qlen)
187 return;
188 ++i;
189 }
190 if (cpu_queue->queue.qlen)
191 queue_work(kcrypto_wq, &cpu_queue->work);
192 }
193
194 void mcryptd_flusher(struct work_struct *__work)
195 {
196 struct mcryptd_alg_cstate *alg_cpu_state;
197 struct mcryptd_alg_state *alg_state;
198 struct mcryptd_flush_list *flist;
199 int cpu;
200
201 cpu = smp_processor_id();
202 alg_cpu_state = container_of(to_delayed_work(__work),
203 struct mcryptd_alg_cstate, flush);
204 alg_state = alg_cpu_state->alg_state;
205 if (alg_cpu_state->cpu != cpu)
206 pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
207 cpu, alg_cpu_state->cpu);
208
209 if (alg_cpu_state->flusher_engaged) {
210 flist = per_cpu_ptr(mcryptd_flist, cpu);
211 mutex_lock(&flist->lock);
212 list_del(&alg_cpu_state->flush_list);
213 alg_cpu_state->flusher_engaged = false;
214 mutex_unlock(&flist->lock);
215 alg_state->flusher(alg_cpu_state);
216 }
217 }
218 EXPORT_SYMBOL_GPL(mcryptd_flusher);
219
220 static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
221 {
222 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
223 struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
224
225 return ictx->queue;
226 }
227
228 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
229 unsigned int tail)
230 {
231 char *p;
232 struct crypto_instance *inst;
233 int err;
234
235 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
236 if (!p)
237 return ERR_PTR(-ENOMEM);
238
239 inst = (void *)(p + head);
240
241 err = -ENAMETOOLONG;
242 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
243 "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
244 goto out_free_inst;
245
246 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
247
248 inst->alg.cra_priority = alg->cra_priority + 50;
249 inst->alg.cra_blocksize = alg->cra_blocksize;
250 inst->alg.cra_alignmask = alg->cra_alignmask;
251
252 out:
253 return p;
254
255 out_free_inst:
256 kfree(p);
257 p = ERR_PTR(err);
258 goto out;
259 }
260
261 static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
262 {
263 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
264 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
265 struct crypto_shash_spawn *spawn = &ictx->spawn;
266 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
267 struct crypto_shash *hash;
268
269 hash = crypto_spawn_shash(spawn);
270 if (IS_ERR(hash))
271 return PTR_ERR(hash);
272
273 ctx->child = hash;
274 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
275 sizeof(struct mcryptd_hash_request_ctx) +
276 crypto_shash_descsize(hash));
277 return 0;
278 }
279
280 static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
281 {
282 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
283
284 crypto_free_shash(ctx->child);
285 }
286
287 static int mcryptd_hash_setkey(struct crypto_ahash *parent,
288 const u8 *key, unsigned int keylen)
289 {
290 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
291 struct crypto_shash *child = ctx->child;
292 int err;
293
294 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
295 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
296 CRYPTO_TFM_REQ_MASK);
297 err = crypto_shash_setkey(child, key, keylen);
298 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
299 CRYPTO_TFM_RES_MASK);
300 return err;
301 }
302
303 static int mcryptd_hash_enqueue(struct ahash_request *req,
304 crypto_completion_t complete)
305 {
306 int ret;
307
308 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
309 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
310 struct mcryptd_queue *queue =
311 mcryptd_get_queue(crypto_ahash_tfm(tfm));
312
313 rctx->complete = req->base.complete;
314 req->base.complete = complete;
315
316 ret = mcryptd_enqueue_request(queue, &req->base, rctx);
317
318 return ret;
319 }
320
321 static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
322 {
323 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
324 struct crypto_shash *child = ctx->child;
325 struct ahash_request *req = ahash_request_cast(req_async);
326 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
327 struct shash_desc *desc = &rctx->desc;
328
329 if (unlikely(err == -EINPROGRESS))
330 goto out;
331
332 desc->tfm = child;
333 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
334
335 err = crypto_shash_init(desc);
336
337 req->base.complete = rctx->complete;
338
339 out:
340 local_bh_disable();
341 rctx->complete(&req->base, err);
342 local_bh_enable();
343 }
344
345 static int mcryptd_hash_init_enqueue(struct ahash_request *req)
346 {
347 return mcryptd_hash_enqueue(req, mcryptd_hash_init);
348 }
349
350 static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
351 {
352 struct ahash_request *req = ahash_request_cast(req_async);
353 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
354
355 if (unlikely(err == -EINPROGRESS))
356 goto out;
357
358 err = shash_ahash_mcryptd_update(req, &rctx->desc);
359 if (err) {
360 req->base.complete = rctx->complete;
361 goto out;
362 }
363
364 return;
365 out:
366 local_bh_disable();
367 rctx->complete(&req->base, err);
368 local_bh_enable();
369 }
370
371 static int mcryptd_hash_update_enqueue(struct ahash_request *req)
372 {
373 return mcryptd_hash_enqueue(req, mcryptd_hash_update);
374 }
375
376 static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
377 {
378 struct ahash_request *req = ahash_request_cast(req_async);
379 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
380
381 if (unlikely(err == -EINPROGRESS))
382 goto out;
383
384 err = shash_ahash_mcryptd_final(req, &rctx->desc);
385 if (err) {
386 req->base.complete = rctx->complete;
387 goto out;
388 }
389
390 return;
391 out:
392 local_bh_disable();
393 rctx->complete(&req->base, err);
394 local_bh_enable();
395 }
396
397 static int mcryptd_hash_final_enqueue(struct ahash_request *req)
398 {
399 return mcryptd_hash_enqueue(req, mcryptd_hash_final);
400 }
401
402 static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
403 {
404 struct ahash_request *req = ahash_request_cast(req_async);
405 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
406
407 if (unlikely(err == -EINPROGRESS))
408 goto out;
409
410 err = shash_ahash_mcryptd_finup(req, &rctx->desc);
411
412 if (err) {
413 req->base.complete = rctx->complete;
414 goto out;
415 }
416
417 return;
418 out:
419 local_bh_disable();
420 rctx->complete(&req->base, err);
421 local_bh_enable();
422 }
423
424 static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
425 {
426 return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
427 }
428
429 static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
430 {
431 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
432 struct crypto_shash *child = ctx->child;
433 struct ahash_request *req = ahash_request_cast(req_async);
434 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
435 struct shash_desc *desc = &rctx->desc;
436
437 if (unlikely(err == -EINPROGRESS))
438 goto out;
439
440 desc->tfm = child;
441 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */
442
443 err = shash_ahash_mcryptd_digest(req, desc);
444
445 if (err) {
446 req->base.complete = rctx->complete;
447 goto out;
448 }
449
450 return;
451 out:
452 local_bh_disable();
453 rctx->complete(&req->base, err);
454 local_bh_enable();
455 }
456
457 static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
458 {
459 return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
460 }
461
462 static int mcryptd_hash_export(struct ahash_request *req, void *out)
463 {
464 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
465
466 return crypto_shash_export(&rctx->desc, out);
467 }
468
469 static int mcryptd_hash_import(struct ahash_request *req, const void *in)
470 {
471 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
472
473 return crypto_shash_import(&rctx->desc, in);
474 }
475
476 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
477 struct mcryptd_queue *queue)
478 {
479 struct hashd_instance_ctx *ctx;
480 struct ahash_instance *inst;
481 struct shash_alg *salg;
482 struct crypto_alg *alg;
483 int err;
484
485 salg = shash_attr_alg(tb[1], 0, 0);
486 if (IS_ERR(salg))
487 return PTR_ERR(salg);
488
489 alg = &salg->base;
490 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
491 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
492 sizeof(*ctx));
493 err = PTR_ERR(inst);
494 if (IS_ERR(inst))
495 goto out_put_alg;
496
497 ctx = ahash_instance_ctx(inst);
498 ctx->queue = queue;
499
500 err = crypto_init_shash_spawn(&ctx->spawn, salg,
501 ahash_crypto_instance(inst));
502 if (err)
503 goto out_free_inst;
504
505 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
506
507 inst->alg.halg.digestsize = salg->digestsize;
508 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
509
510 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
511 inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
512
513 inst->alg.init = mcryptd_hash_init_enqueue;
514 inst->alg.update = mcryptd_hash_update_enqueue;
515 inst->alg.final = mcryptd_hash_final_enqueue;
516 inst->alg.finup = mcryptd_hash_finup_enqueue;
517 inst->alg.export = mcryptd_hash_export;
518 inst->alg.import = mcryptd_hash_import;
519 inst->alg.setkey = mcryptd_hash_setkey;
520 inst->alg.digest = mcryptd_hash_digest_enqueue;
521
522 err = ahash_register_instance(tmpl, inst);
523 if (err) {
524 crypto_drop_shash(&ctx->spawn);
525 out_free_inst:
526 kfree(inst);
527 }
528
529 out_put_alg:
530 crypto_mod_put(alg);
531 return err;
532 }
533
534 static struct mcryptd_queue mqueue;
535
536 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
537 {
538 struct crypto_attr_type *algt;
539
540 algt = crypto_get_attr_type(tb);
541 if (IS_ERR(algt))
542 return PTR_ERR(algt);
543
544 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
545 case CRYPTO_ALG_TYPE_DIGEST:
546 return mcryptd_create_hash(tmpl, tb, &mqueue);
547 break;
548 }
549
550 return -EINVAL;
551 }
552
553 static void mcryptd_free(struct crypto_instance *inst)
554 {
555 struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
556 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
557
558 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
559 case CRYPTO_ALG_TYPE_AHASH:
560 crypto_drop_shash(&hctx->spawn);
561 kfree(ahash_instance(inst));
562 return;
563 default:
564 crypto_drop_spawn(&ctx->spawn);
565 kfree(inst);
566 }
567 }
568
569 static struct crypto_template mcryptd_tmpl = {
570 .name = "mcryptd",
571 .create = mcryptd_create,
572 .free = mcryptd_free,
573 .module = THIS_MODULE,
574 };
575
576 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
577 u32 type, u32 mask)
578 {
579 char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
580 struct crypto_ahash *tfm;
581
582 if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
583 "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
584 return ERR_PTR(-EINVAL);
585 tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
586 if (IS_ERR(tfm))
587 return ERR_CAST(tfm);
588 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
589 crypto_free_ahash(tfm);
590 return ERR_PTR(-EINVAL);
591 }
592
593 return __mcryptd_ahash_cast(tfm);
594 }
595 EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
596
597 int shash_ahash_mcryptd_digest(struct ahash_request *req,
598 struct shash_desc *desc)
599 {
600 int err;
601
602 err = crypto_shash_init(desc) ?:
603 shash_ahash_mcryptd_finup(req, desc);
604
605 return err;
606 }
607 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
608
609 int shash_ahash_mcryptd_update(struct ahash_request *req,
610 struct shash_desc *desc)
611 {
612 struct crypto_shash *tfm = desc->tfm;
613 struct shash_alg *shash = crypto_shash_alg(tfm);
614
615 /* alignment is to be done by multi-buffer crypto algorithm if needed */
616
617 return shash->update(desc, NULL, 0);
618 }
619 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
620
621 int shash_ahash_mcryptd_finup(struct ahash_request *req,
622 struct shash_desc *desc)
623 {
624 struct crypto_shash *tfm = desc->tfm;
625 struct shash_alg *shash = crypto_shash_alg(tfm);
626
627 /* alignment is to be done by multi-buffer crypto algorithm if needed */
628
629 return shash->finup(desc, NULL, 0, req->result);
630 }
631 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
632
633 int shash_ahash_mcryptd_final(struct ahash_request *req,
634 struct shash_desc *desc)
635 {
636 struct crypto_shash *tfm = desc->tfm;
637 struct shash_alg *shash = crypto_shash_alg(tfm);
638
639 /* alignment is to be done by multi-buffer crypto algorithm if needed */
640
641 return shash->final(desc, req->result);
642 }
643 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
644
645 struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
646 {
647 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
648
649 return ctx->child;
650 }
651 EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
652
653 struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
654 {
655 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
656 return &rctx->desc;
657 }
658 EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
659
660 void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
661 {
662 crypto_free_ahash(&tfm->base);
663 }
664 EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
665
666
667 static int __init mcryptd_init(void)
668 {
669 int err, cpu;
670 struct mcryptd_flush_list *flist;
671
672 mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
673 for_each_possible_cpu(cpu) {
674 flist = per_cpu_ptr(mcryptd_flist, cpu);
675 INIT_LIST_HEAD(&flist->list);
676 mutex_init(&flist->lock);
677 }
678
679 err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
680 if (err) {
681 free_percpu(mcryptd_flist);
682 return err;
683 }
684
685 err = crypto_register_template(&mcryptd_tmpl);
686 if (err) {
687 mcryptd_fini_queue(&mqueue);
688 free_percpu(mcryptd_flist);
689 }
690
691 return err;
692 }
693
694 static void __exit mcryptd_exit(void)
695 {
696 mcryptd_fini_queue(&mqueue);
697 crypto_unregister_template(&mcryptd_tmpl);
698 free_percpu(mcryptd_flist);
699 }
700
701 subsys_initcall(mcryptd_init);
702 module_exit(mcryptd_exit);
703
704 MODULE_LICENSE("GPL");
705 MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
706 MODULE_ALIAS_CRYPTO("mcryptd");