]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/cryptd.c
crypto: cryptd - Use crypto_ahash_set_reqsize
[mirror_ubuntu-bionic-kernel.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/cryptd.h>
16 #include <crypto/crypto_wq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25
26 #define CRYPTD_MAX_CPU_QLEN 100
27
28 struct cryptd_cpu_queue {
29 struct crypto_queue queue;
30 struct work_struct work;
31 };
32
33 struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
35 };
36
37 struct cryptd_instance_ctx {
38 struct crypto_spawn spawn;
39 struct cryptd_queue *queue;
40 };
41
42 struct hashd_instance_ctx {
43 struct crypto_shash_spawn spawn;
44 struct cryptd_queue *queue;
45 };
46
47 struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child;
49 };
50
51 struct cryptd_blkcipher_request_ctx {
52 crypto_completion_t complete;
53 };
54
55 struct cryptd_hash_ctx {
56 struct crypto_shash *child;
57 };
58
59 struct cryptd_hash_request_ctx {
60 crypto_completion_t complete;
61 struct shash_desc desc;
62 };
63
64 static void cryptd_queue_worker(struct work_struct *work);
65
66 static int cryptd_init_queue(struct cryptd_queue *queue,
67 unsigned int max_cpu_qlen)
68 {
69 int cpu;
70 struct cryptd_cpu_queue *cpu_queue;
71
72 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
73 if (!queue->cpu_queue)
74 return -ENOMEM;
75 for_each_possible_cpu(cpu) {
76 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
77 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
78 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
79 }
80 return 0;
81 }
82
83 static void cryptd_fini_queue(struct cryptd_queue *queue)
84 {
85 int cpu;
86 struct cryptd_cpu_queue *cpu_queue;
87
88 for_each_possible_cpu(cpu) {
89 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
90 BUG_ON(cpu_queue->queue.qlen);
91 }
92 free_percpu(queue->cpu_queue);
93 }
94
95 static int cryptd_enqueue_request(struct cryptd_queue *queue,
96 struct crypto_async_request *request)
97 {
98 int cpu, err;
99 struct cryptd_cpu_queue *cpu_queue;
100
101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu();
106
107 return err;
108 }
109
110 /* Called in workqueue context, do one real cryption work (via
111 * req->complete) and reschedule itself if there are more work to
112 * do. */
113 static void cryptd_queue_worker(struct work_struct *work)
114 {
115 struct cryptd_cpu_queue *cpu_queue;
116 struct crypto_async_request *req, *backlog;
117
118 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
119 /* Only handle one request at a time to avoid hogging crypto
120 * workqueue. preempt_disable/enable is used to prevent
121 * being preempted by cryptd_enqueue_request() */
122 preempt_disable();
123 backlog = crypto_get_backlog(&cpu_queue->queue);
124 req = crypto_dequeue_request(&cpu_queue->queue);
125 preempt_enable();
126
127 if (!req)
128 return;
129
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132 req->complete(req, 0);
133
134 if (cpu_queue->queue.qlen)
135 queue_work(kcrypto_wq, &cpu_queue->work);
136 }
137
138 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
139 {
140 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
141 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
142 return ictx->queue;
143 }
144
145 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
146 const u8 *key, unsigned int keylen)
147 {
148 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
149 struct crypto_blkcipher *child = ctx->child;
150 int err;
151
152 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
153 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
154 CRYPTO_TFM_REQ_MASK);
155 err = crypto_blkcipher_setkey(child, key, keylen);
156 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
157 CRYPTO_TFM_RES_MASK);
158 return err;
159 }
160
161 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
162 struct crypto_blkcipher *child,
163 int err,
164 int (*crypt)(struct blkcipher_desc *desc,
165 struct scatterlist *dst,
166 struct scatterlist *src,
167 unsigned int len))
168 {
169 struct cryptd_blkcipher_request_ctx *rctx;
170 struct blkcipher_desc desc;
171
172 rctx = ablkcipher_request_ctx(req);
173
174 if (unlikely(err == -EINPROGRESS))
175 goto out;
176
177 desc.tfm = child;
178 desc.info = req->info;
179 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
180
181 err = crypt(&desc, req->dst, req->src, req->nbytes);
182
183 req->base.complete = rctx->complete;
184
185 out:
186 local_bh_disable();
187 rctx->complete(&req->base, err);
188 local_bh_enable();
189 }
190
191 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
192 {
193 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
194 struct crypto_blkcipher *child = ctx->child;
195
196 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
197 crypto_blkcipher_crt(child)->encrypt);
198 }
199
200 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
201 {
202 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
203 struct crypto_blkcipher *child = ctx->child;
204
205 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
206 crypto_blkcipher_crt(child)->decrypt);
207 }
208
209 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
210 crypto_completion_t complete)
211 {
212 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
213 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
214 struct cryptd_queue *queue;
215
216 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
217 rctx->complete = req->base.complete;
218 req->base.complete = complete;
219
220 return cryptd_enqueue_request(queue, &req->base);
221 }
222
223 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
224 {
225 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
226 }
227
228 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
229 {
230 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
231 }
232
233 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
234 {
235 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
236 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
237 struct crypto_spawn *spawn = &ictx->spawn;
238 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct crypto_blkcipher *cipher;
240
241 cipher = crypto_spawn_blkcipher(spawn);
242 if (IS_ERR(cipher))
243 return PTR_ERR(cipher);
244
245 ctx->child = cipher;
246 tfm->crt_ablkcipher.reqsize =
247 sizeof(struct cryptd_blkcipher_request_ctx);
248 return 0;
249 }
250
251 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
252 {
253 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 crypto_free_blkcipher(ctx->child);
256 }
257
258 static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
259 unsigned int tail)
260 {
261 struct crypto_instance *inst;
262 int err;
263
264 inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL);
265 if (!inst) {
266 inst = ERR_PTR(-ENOMEM);
267 goto out;
268 }
269
270 err = -ENAMETOOLONG;
271 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
272 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
273 goto out_free_inst;
274
275 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
276
277 inst->alg.cra_priority = alg->cra_priority + 50;
278 inst->alg.cra_blocksize = alg->cra_blocksize;
279 inst->alg.cra_alignmask = alg->cra_alignmask;
280
281 out:
282 return inst;
283
284 out_free_inst:
285 kfree(inst);
286 inst = ERR_PTR(err);
287 goto out;
288 }
289
290 static struct crypto_instance *cryptd_alloc_blkcipher(
291 struct rtattr **tb, struct cryptd_queue *queue)
292 {
293 struct cryptd_instance_ctx *ctx;
294 struct crypto_instance *inst;
295 struct crypto_alg *alg;
296 int err;
297
298 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
299 CRYPTO_ALG_TYPE_MASK);
300 if (IS_ERR(alg))
301 return ERR_CAST(alg);
302
303 inst = cryptd_alloc_instance(alg, sizeof(*ctx));
304 if (IS_ERR(inst))
305 goto out_put_alg;
306
307 ctx = crypto_instance_ctx(inst);
308 ctx->queue = queue;
309
310 err = crypto_init_spawn(&ctx->spawn, alg, inst,
311 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
312 if (err)
313 goto out_free_inst;
314
315 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
316 inst->alg.cra_type = &crypto_ablkcipher_type;
317
318 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
319 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
320 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
321
322 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
323
324 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
325
326 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
327 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
328
329 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
330 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
331 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
332
333 out_put_alg:
334 crypto_mod_put(alg);
335 return inst;
336
337 out_free_inst:
338 kfree(inst);
339 inst = ERR_PTR(err);
340 goto out_put_alg;
341 }
342
343 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
344 {
345 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
346 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
347 struct crypto_shash_spawn *spawn = &ictx->spawn;
348 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
349 struct crypto_shash *hash;
350
351 hash = crypto_spawn_shash(spawn);
352 if (IS_ERR(hash))
353 return PTR_ERR(hash);
354
355 ctx->child = hash;
356 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
357 sizeof(struct cryptd_hash_request_ctx) +
358 crypto_shash_descsize(hash));
359 return 0;
360 }
361
362 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
363 {
364 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
365
366 crypto_free_shash(ctx->child);
367 }
368
369 static int cryptd_hash_setkey(struct crypto_ahash *parent,
370 const u8 *key, unsigned int keylen)
371 {
372 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
373 struct crypto_shash *child = ctx->child;
374 int err;
375
376 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
377 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
378 CRYPTO_TFM_REQ_MASK);
379 err = crypto_shash_setkey(child, key, keylen);
380 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
381 CRYPTO_TFM_RES_MASK);
382 return err;
383 }
384
385 static int cryptd_hash_enqueue(struct ahash_request *req,
386 crypto_completion_t complete)
387 {
388 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
389 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390 struct cryptd_queue *queue =
391 cryptd_get_queue(crypto_ahash_tfm(tfm));
392
393 rctx->complete = req->base.complete;
394 req->base.complete = complete;
395
396 return cryptd_enqueue_request(queue, &req->base);
397 }
398
399 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
400 {
401 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
402 struct crypto_shash *child = ctx->child;
403 struct ahash_request *req = ahash_request_cast(req_async);
404 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
405 struct shash_desc *desc = &rctx->desc;
406
407 if (unlikely(err == -EINPROGRESS))
408 goto out;
409
410 desc->tfm = child;
411 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
412
413 err = crypto_shash_init(desc);
414
415 req->base.complete = rctx->complete;
416
417 out:
418 local_bh_disable();
419 rctx->complete(&req->base, err);
420 local_bh_enable();
421 }
422
423 static int cryptd_hash_init_enqueue(struct ahash_request *req)
424 {
425 return cryptd_hash_enqueue(req, cryptd_hash_init);
426 }
427
428 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
429 {
430 struct ahash_request *req = ahash_request_cast(req_async);
431 struct cryptd_hash_request_ctx *rctx;
432
433 rctx = ahash_request_ctx(req);
434
435 if (unlikely(err == -EINPROGRESS))
436 goto out;
437
438 err = shash_ahash_update(req, &rctx->desc);
439
440 req->base.complete = rctx->complete;
441
442 out:
443 local_bh_disable();
444 rctx->complete(&req->base, err);
445 local_bh_enable();
446 }
447
448 static int cryptd_hash_update_enqueue(struct ahash_request *req)
449 {
450 return cryptd_hash_enqueue(req, cryptd_hash_update);
451 }
452
453 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
454 {
455 struct ahash_request *req = ahash_request_cast(req_async);
456 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
457
458 if (unlikely(err == -EINPROGRESS))
459 goto out;
460
461 err = crypto_shash_final(&rctx->desc, req->result);
462
463 req->base.complete = rctx->complete;
464
465 out:
466 local_bh_disable();
467 rctx->complete(&req->base, err);
468 local_bh_enable();
469 }
470
471 static int cryptd_hash_final_enqueue(struct ahash_request *req)
472 {
473 return cryptd_hash_enqueue(req, cryptd_hash_final);
474 }
475
476 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
477 {
478 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
479 struct crypto_shash *child = ctx->child;
480 struct ahash_request *req = ahash_request_cast(req_async);
481 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482 struct shash_desc *desc = &rctx->desc;
483
484 if (unlikely(err == -EINPROGRESS))
485 goto out;
486
487 desc->tfm = child;
488 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
489
490 err = shash_ahash_digest(req, desc);
491
492 req->base.complete = rctx->complete;
493
494 out:
495 local_bh_disable();
496 rctx->complete(&req->base, err);
497 local_bh_enable();
498 }
499
500 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
501 {
502 return cryptd_hash_enqueue(req, cryptd_hash_digest);
503 }
504
505 static struct crypto_instance *cryptd_alloc_hash(
506 struct rtattr **tb, struct cryptd_queue *queue)
507 {
508 struct hashd_instance_ctx *ctx;
509 struct crypto_instance *inst;
510 struct shash_alg *salg;
511 struct crypto_alg *alg;
512 int err;
513
514 salg = shash_attr_alg(tb[1], 0, 0);
515 if (IS_ERR(salg))
516 return ERR_CAST(salg);
517
518 alg = &salg->base;
519 inst = cryptd_alloc_instance(alg, sizeof(*ctx));
520 if (IS_ERR(inst))
521 goto out_put_alg;
522
523 ctx = crypto_instance_ctx(inst);
524 ctx->queue = queue;
525
526 err = crypto_init_shash_spawn(&ctx->spawn, salg, inst);
527 if (err)
528 goto out_free_inst;
529
530 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
531 inst->alg.cra_type = &crypto_ahash_type;
532
533 inst->alg.cra_ahash.digestsize = salg->digestsize;
534 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
535
536 inst->alg.cra_init = cryptd_hash_init_tfm;
537 inst->alg.cra_exit = cryptd_hash_exit_tfm;
538
539 inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
540 inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
541 inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
542 inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
543 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
544
545 out_put_alg:
546 crypto_mod_put(alg);
547 return inst;
548
549 out_free_inst:
550 kfree(inst);
551 inst = ERR_PTR(err);
552 goto out_put_alg;
553 }
554
555 static struct cryptd_queue queue;
556
557 static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
558 {
559 struct crypto_attr_type *algt;
560
561 algt = crypto_get_attr_type(tb);
562 if (IS_ERR(algt))
563 return ERR_CAST(algt);
564
565 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
566 case CRYPTO_ALG_TYPE_BLKCIPHER:
567 return cryptd_alloc_blkcipher(tb, &queue);
568 case CRYPTO_ALG_TYPE_DIGEST:
569 return cryptd_alloc_hash(tb, &queue);
570 }
571
572 return ERR_PTR(-EINVAL);
573 }
574
575 static void cryptd_free(struct crypto_instance *inst)
576 {
577 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
578
579 crypto_drop_spawn(&ctx->spawn);
580 kfree(inst);
581 }
582
583 static struct crypto_template cryptd_tmpl = {
584 .name = "cryptd",
585 .alloc = cryptd_alloc,
586 .free = cryptd_free,
587 .module = THIS_MODULE,
588 };
589
590 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
591 u32 type, u32 mask)
592 {
593 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
594 struct crypto_tfm *tfm;
595
596 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
597 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
598 return ERR_PTR(-EINVAL);
599 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
600 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
601 mask &= ~CRYPTO_ALG_TYPE_MASK;
602 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
603 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
604 if (IS_ERR(tfm))
605 return ERR_CAST(tfm);
606 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
607 crypto_free_tfm(tfm);
608 return ERR_PTR(-EINVAL);
609 }
610
611 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
612 }
613 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
614
615 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
616 {
617 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
618 return ctx->child;
619 }
620 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
621
622 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
623 {
624 crypto_free_ablkcipher(&tfm->base);
625 }
626 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
627
628 static int __init cryptd_init(void)
629 {
630 int err;
631
632 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
633 if (err)
634 return err;
635
636 err = crypto_register_template(&cryptd_tmpl);
637 if (err)
638 cryptd_fini_queue(&queue);
639
640 return err;
641 }
642
643 static void __exit cryptd_exit(void)
644 {
645 cryptd_fini_queue(&queue);
646 crypto_unregister_template(&cryptd_tmpl);
647 }
648
649 module_init(cryptd_init);
650 module_exit(cryptd_exit);
651
652 MODULE_LICENSE("GPL");
653 MODULE_DESCRIPTION("Software async crypto daemon");