]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/mcryptd.c
Merge tag 'arc-4.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[mirror_ubuntu-artful-kernel.git] / crypto / mcryptd.c
1 /*
2 * Software multibuffer async crypto daemon.
3 *
4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
5 *
6 * Adapted from crypto daemon.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15 #include <crypto/algapi.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/aead.h>
18 #include <crypto/mcryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/hardirq.h>
29
30 #define MCRYPTD_MAX_CPU_QLEN 100
31 #define MCRYPTD_BATCH 9
32
33 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34 unsigned int tail);
35
36 struct mcryptd_flush_list {
37 struct list_head list;
38 struct mutex lock;
39 };
40
41 static struct mcryptd_flush_list __percpu *mcryptd_flist;
42
43 struct hashd_instance_ctx {
44 struct crypto_ahash_spawn spawn;
45 struct mcryptd_queue *queue;
46 };
47
48 static void mcryptd_queue_worker(struct work_struct *work);
49
50 void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
51 {
52 struct mcryptd_flush_list *flist;
53
54 if (!cstate->flusher_engaged) {
55 /* put the flusher on the flush list */
56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57 mutex_lock(&flist->lock);
58 list_add_tail(&cstate->flush_list, &flist->list);
59 cstate->flusher_engaged = true;
60 cstate->next_flush = jiffies + delay;
61 queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62 &cstate->flush, delay);
63 mutex_unlock(&flist->lock);
64 }
65 }
66 EXPORT_SYMBOL(mcryptd_arm_flusher);
67
68 static int mcryptd_init_queue(struct mcryptd_queue *queue,
69 unsigned int max_cpu_qlen)
70 {
71 int cpu;
72 struct mcryptd_cpu_queue *cpu_queue;
73
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76 if (!queue->cpu_queue)
77 return -ENOMEM;
78 for_each_possible_cpu(cpu) {
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83 }
84 return 0;
85 }
86
87 static void mcryptd_fini_queue(struct mcryptd_queue *queue)
88 {
89 int cpu;
90 struct mcryptd_cpu_queue *cpu_queue;
91
92 for_each_possible_cpu(cpu) {
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
94 BUG_ON(cpu_queue->queue.qlen);
95 }
96 free_percpu(queue->cpu_queue);
97 }
98
99 static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
100 struct crypto_async_request *request,
101 struct mcryptd_hash_request_ctx *rctx)
102 {
103 int cpu, err;
104 struct mcryptd_cpu_queue *cpu_queue;
105
106 cpu = get_cpu();
107 cpu_queue = this_cpu_ptr(queue->cpu_queue);
108 rctx->tag.cpu = cpu;
109
110 err = crypto_enqueue_request(&cpu_queue->queue, request);
111 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112 cpu, cpu_queue, request);
113 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114 put_cpu();
115
116 return err;
117 }
118
119 /*
120 * Try to opportunisticlly flush the partially completed jobs if
121 * crypto daemon is the only task running.
122 */
123 static void mcryptd_opportunistic_flush(void)
124 {
125 struct mcryptd_flush_list *flist;
126 struct mcryptd_alg_cstate *cstate;
127
128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
129 while (single_task_running()) {
130 mutex_lock(&flist->lock);
131 cstate = list_first_entry_or_null(&flist->list,
132 struct mcryptd_alg_cstate, flush_list);
133 if (!cstate || !cstate->flusher_engaged) {
134 mutex_unlock(&flist->lock);
135 return;
136 }
137 list_del(&cstate->flush_list);
138 cstate->flusher_engaged = false;
139 mutex_unlock(&flist->lock);
140 cstate->alg_state->flusher(cstate);
141 }
142 }
143
144 /*
145 * Called in workqueue context, do one real cryption work (via
146 * req->complete) and reschedule itself if there are more work to
147 * do.
148 */
149 static void mcryptd_queue_worker(struct work_struct *work)
150 {
151 struct mcryptd_cpu_queue *cpu_queue;
152 struct crypto_async_request *req, *backlog;
153 int i;
154
155 /*
156 * Need to loop through more than once for multi-buffer to
157 * be effective.
158 */
159
160 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
161 i = 0;
162 while (i < MCRYPTD_BATCH || single_task_running()) {
163 /*
164 * preempt_disable/enable is used to prevent
165 * being preempted by mcryptd_enqueue_request()
166 */
167 local_bh_disable();
168 preempt_disable();
169 backlog = crypto_get_backlog(&cpu_queue->queue);
170 req = crypto_dequeue_request(&cpu_queue->queue);
171 preempt_enable();
172 local_bh_enable();
173
174 if (!req) {
175 mcryptd_opportunistic_flush();
176 return;
177 }
178
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182 if (!cpu_queue->queue.qlen)
183 return;
184 ++i;
185 }
186 if (cpu_queue->queue.qlen)
187 queue_work(kcrypto_wq, &cpu_queue->work);
188 }
189
190 void mcryptd_flusher(struct work_struct *__work)
191 {
192 struct mcryptd_alg_cstate *alg_cpu_state;
193 struct mcryptd_alg_state *alg_state;
194 struct mcryptd_flush_list *flist;
195 int cpu;
196
197 cpu = smp_processor_id();
198 alg_cpu_state = container_of(to_delayed_work(__work),
199 struct mcryptd_alg_cstate, flush);
200 alg_state = alg_cpu_state->alg_state;
201 if (alg_cpu_state->cpu != cpu)
202 pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
203 cpu, alg_cpu_state->cpu);
204
205 if (alg_cpu_state->flusher_engaged) {
206 flist = per_cpu_ptr(mcryptd_flist, cpu);
207 mutex_lock(&flist->lock);
208 list_del(&alg_cpu_state->flush_list);
209 alg_cpu_state->flusher_engaged = false;
210 mutex_unlock(&flist->lock);
211 alg_state->flusher(alg_cpu_state);
212 }
213 }
214 EXPORT_SYMBOL_GPL(mcryptd_flusher);
215
216 static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
217 {
218 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
219 struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
220
221 return ictx->queue;
222 }
223
224 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
225 unsigned int tail)
226 {
227 char *p;
228 struct crypto_instance *inst;
229 int err;
230
231 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
232 if (!p)
233 return ERR_PTR(-ENOMEM);
234
235 inst = (void *)(p + head);
236
237 err = -ENAMETOOLONG;
238 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
239 "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
240 goto out_free_inst;
241
242 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
243
244 inst->alg.cra_priority = alg->cra_priority + 50;
245 inst->alg.cra_blocksize = alg->cra_blocksize;
246 inst->alg.cra_alignmask = alg->cra_alignmask;
247
248 out:
249 return p;
250
251 out_free_inst:
252 kfree(p);
253 p = ERR_PTR(err);
254 goto out;
255 }
256
257 static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
258 u32 *mask)
259 {
260 struct crypto_attr_type *algt;
261
262 algt = crypto_get_attr_type(tb);
263 if (IS_ERR(algt))
264 return false;
265
266 *type |= algt->type & CRYPTO_ALG_INTERNAL;
267 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
268
269 if (*type & *mask & CRYPTO_ALG_INTERNAL)
270 return true;
271 else
272 return false;
273 }
274
275 static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
276 {
277 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
278 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
279 struct crypto_ahash_spawn *spawn = &ictx->spawn;
280 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
281 struct crypto_ahash *hash;
282
283 hash = crypto_spawn_ahash(spawn);
284 if (IS_ERR(hash))
285 return PTR_ERR(hash);
286
287 ctx->child = hash;
288 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
289 sizeof(struct mcryptd_hash_request_ctx) +
290 crypto_ahash_reqsize(hash));
291 return 0;
292 }
293
294 static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
295 {
296 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
297
298 crypto_free_ahash(ctx->child);
299 }
300
301 static int mcryptd_hash_setkey(struct crypto_ahash *parent,
302 const u8 *key, unsigned int keylen)
303 {
304 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
305 struct crypto_ahash *child = ctx->child;
306 int err;
307
308 crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
309 crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
310 CRYPTO_TFM_REQ_MASK);
311 err = crypto_ahash_setkey(child, key, keylen);
312 crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
313 CRYPTO_TFM_RES_MASK);
314 return err;
315 }
316
317 static int mcryptd_hash_enqueue(struct ahash_request *req,
318 crypto_completion_t complete)
319 {
320 int ret;
321
322 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
324 struct mcryptd_queue *queue =
325 mcryptd_get_queue(crypto_ahash_tfm(tfm));
326
327 rctx->complete = req->base.complete;
328 req->base.complete = complete;
329
330 ret = mcryptd_enqueue_request(queue, &req->base, rctx);
331
332 return ret;
333 }
334
335 static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
336 {
337 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
338 struct crypto_ahash *child = ctx->child;
339 struct ahash_request *req = ahash_request_cast(req_async);
340 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
341 struct ahash_request *desc = &rctx->areq;
342
343 if (unlikely(err == -EINPROGRESS))
344 goto out;
345
346 ahash_request_set_tfm(desc, child);
347 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
348 rctx->complete, req_async);
349
350 rctx->out = req->result;
351 err = crypto_ahash_init(desc);
352
353 out:
354 local_bh_disable();
355 rctx->complete(&req->base, err);
356 local_bh_enable();
357 }
358
359 static int mcryptd_hash_init_enqueue(struct ahash_request *req)
360 {
361 return mcryptd_hash_enqueue(req, mcryptd_hash_init);
362 }
363
364 static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
365 {
366 struct ahash_request *req = ahash_request_cast(req_async);
367 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
368
369 if (unlikely(err == -EINPROGRESS))
370 goto out;
371
372 rctx->out = req->result;
373 err = ahash_mcryptd_update(&rctx->areq);
374 if (err) {
375 req->base.complete = rctx->complete;
376 goto out;
377 }
378
379 return;
380 out:
381 local_bh_disable();
382 rctx->complete(&req->base, err);
383 local_bh_enable();
384 }
385
386 static int mcryptd_hash_update_enqueue(struct ahash_request *req)
387 {
388 return mcryptd_hash_enqueue(req, mcryptd_hash_update);
389 }
390
391 static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
392 {
393 struct ahash_request *req = ahash_request_cast(req_async);
394 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
395
396 if (unlikely(err == -EINPROGRESS))
397 goto out;
398
399 rctx->out = req->result;
400 err = ahash_mcryptd_final(&rctx->areq);
401 if (err) {
402 req->base.complete = rctx->complete;
403 goto out;
404 }
405
406 return;
407 out:
408 local_bh_disable();
409 rctx->complete(&req->base, err);
410 local_bh_enable();
411 }
412
413 static int mcryptd_hash_final_enqueue(struct ahash_request *req)
414 {
415 return mcryptd_hash_enqueue(req, mcryptd_hash_final);
416 }
417
418 static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
419 {
420 struct ahash_request *req = ahash_request_cast(req_async);
421 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
422
423 if (unlikely(err == -EINPROGRESS))
424 goto out;
425 rctx->out = req->result;
426 err = ahash_mcryptd_finup(&rctx->areq);
427
428 if (err) {
429 req->base.complete = rctx->complete;
430 goto out;
431 }
432
433 return;
434 out:
435 local_bh_disable();
436 rctx->complete(&req->base, err);
437 local_bh_enable();
438 }
439
440 static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
441 {
442 return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
443 }
444
445 static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
446 {
447 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
448 struct crypto_ahash *child = ctx->child;
449 struct ahash_request *req = ahash_request_cast(req_async);
450 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
451 struct ahash_request *desc = &rctx->areq;
452
453 if (unlikely(err == -EINPROGRESS))
454 goto out;
455
456 ahash_request_set_tfm(desc, child);
457 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
458 rctx->complete, req_async);
459
460 rctx->out = req->result;
461 err = ahash_mcryptd_digest(desc);
462
463 out:
464 local_bh_disable();
465 rctx->complete(&req->base, err);
466 local_bh_enable();
467 }
468
469 static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
470 {
471 return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
472 }
473
474 static int mcryptd_hash_export(struct ahash_request *req, void *out)
475 {
476 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
477
478 return crypto_ahash_export(&rctx->areq, out);
479 }
480
481 static int mcryptd_hash_import(struct ahash_request *req, const void *in)
482 {
483 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
484
485 return crypto_ahash_import(&rctx->areq, in);
486 }
487
488 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
489 struct mcryptd_queue *queue)
490 {
491 struct hashd_instance_ctx *ctx;
492 struct ahash_instance *inst;
493 struct hash_alg_common *halg;
494 struct crypto_alg *alg;
495 u32 type = 0;
496 u32 mask = 0;
497 int err;
498
499 if (!mcryptd_check_internal(tb, &type, &mask))
500 return -EINVAL;
501
502 halg = ahash_attr_alg(tb[1], type, mask);
503 if (IS_ERR(halg))
504 return PTR_ERR(halg);
505
506 alg = &halg->base;
507 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
508 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
509 sizeof(*ctx));
510 err = PTR_ERR(inst);
511 if (IS_ERR(inst))
512 goto out_put_alg;
513
514 ctx = ahash_instance_ctx(inst);
515 ctx->queue = queue;
516
517 err = crypto_init_ahash_spawn(&ctx->spawn, halg,
518 ahash_crypto_instance(inst));
519 if (err)
520 goto out_free_inst;
521
522 type = CRYPTO_ALG_ASYNC;
523 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
524 type |= CRYPTO_ALG_INTERNAL;
525 inst->alg.halg.base.cra_flags = type;
526
527 inst->alg.halg.digestsize = halg->digestsize;
528 inst->alg.halg.statesize = halg->statesize;
529 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
530
531 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
532 inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
533
534 inst->alg.init = mcryptd_hash_init_enqueue;
535 inst->alg.update = mcryptd_hash_update_enqueue;
536 inst->alg.final = mcryptd_hash_final_enqueue;
537 inst->alg.finup = mcryptd_hash_finup_enqueue;
538 inst->alg.export = mcryptd_hash_export;
539 inst->alg.import = mcryptd_hash_import;
540 inst->alg.setkey = mcryptd_hash_setkey;
541 inst->alg.digest = mcryptd_hash_digest_enqueue;
542
543 err = ahash_register_instance(tmpl, inst);
544 if (err) {
545 crypto_drop_ahash(&ctx->spawn);
546 out_free_inst:
547 kfree(inst);
548 }
549
550 out_put_alg:
551 crypto_mod_put(alg);
552 return err;
553 }
554
555 static struct mcryptd_queue mqueue;
556
557 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
558 {
559 struct crypto_attr_type *algt;
560
561 algt = crypto_get_attr_type(tb);
562 if (IS_ERR(algt))
563 return PTR_ERR(algt);
564
565 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
566 case CRYPTO_ALG_TYPE_DIGEST:
567 return mcryptd_create_hash(tmpl, tb, &mqueue);
568 break;
569 }
570
571 return -EINVAL;
572 }
573
574 static void mcryptd_free(struct crypto_instance *inst)
575 {
576 struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
577 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
578
579 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
580 case CRYPTO_ALG_TYPE_AHASH:
581 crypto_drop_ahash(&hctx->spawn);
582 kfree(ahash_instance(inst));
583 return;
584 default:
585 crypto_drop_spawn(&ctx->spawn);
586 kfree(inst);
587 }
588 }
589
590 static struct crypto_template mcryptd_tmpl = {
591 .name = "mcryptd",
592 .create = mcryptd_create,
593 .free = mcryptd_free,
594 .module = THIS_MODULE,
595 };
596
597 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
598 u32 type, u32 mask)
599 {
600 char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
601 struct crypto_ahash *tfm;
602
603 if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
604 "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
605 return ERR_PTR(-EINVAL);
606 tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
607 if (IS_ERR(tfm))
608 return ERR_CAST(tfm);
609 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
610 crypto_free_ahash(tfm);
611 return ERR_PTR(-EINVAL);
612 }
613
614 return __mcryptd_ahash_cast(tfm);
615 }
616 EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
617
618 int ahash_mcryptd_digest(struct ahash_request *desc)
619 {
620 return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
621 }
622
623 int ahash_mcryptd_update(struct ahash_request *desc)
624 {
625 /* alignment is to be done by multi-buffer crypto algorithm if needed */
626
627 return crypto_ahash_update(desc);
628 }
629
630 int ahash_mcryptd_finup(struct ahash_request *desc)
631 {
632 /* alignment is to be done by multi-buffer crypto algorithm if needed */
633
634 return crypto_ahash_finup(desc);
635 }
636
637 int ahash_mcryptd_final(struct ahash_request *desc)
638 {
639 /* alignment is to be done by multi-buffer crypto algorithm if needed */
640
641 return crypto_ahash_final(desc);
642 }
643
644 struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
645 {
646 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
647
648 return ctx->child;
649 }
650 EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
651
652 struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
653 {
654 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
655 return &rctx->areq;
656 }
657 EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
658
659 void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
660 {
661 crypto_free_ahash(&tfm->base);
662 }
663 EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
664
665 static int __init mcryptd_init(void)
666 {
667 int err, cpu;
668 struct mcryptd_flush_list *flist;
669
670 mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
671 for_each_possible_cpu(cpu) {
672 flist = per_cpu_ptr(mcryptd_flist, cpu);
673 INIT_LIST_HEAD(&flist->list);
674 mutex_init(&flist->lock);
675 }
676
677 err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
678 if (err) {
679 free_percpu(mcryptd_flist);
680 return err;
681 }
682
683 err = crypto_register_template(&mcryptd_tmpl);
684 if (err) {
685 mcryptd_fini_queue(&mqueue);
686 free_percpu(mcryptd_flist);
687 }
688
689 return err;
690 }
691
692 static void __exit mcryptd_exit(void)
693 {
694 mcryptd_fini_queue(&mqueue);
695 crypto_unregister_template(&mcryptd_tmpl);
696 free_percpu(mcryptd_flist);
697 }
698
699 subsys_initcall(mcryptd_init);
700 module_exit(mcryptd_exit);
701
702 MODULE_LICENSE("GPL");
703 MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
704 MODULE_ALIAS_CRYPTO("mcryptd");