]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - crypto/cryptd.c
autofs4: Do not potentially dereference NULL pointer returned by fget() in autofs_dev...
[mirror_ubuntu-bionic-kernel.git] / crypto / cryptd.c
CommitLineData
124b53d0
HX
1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
298c926c
AH
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
124b53d0
HX
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/algapi.h>
18e33e6d 21#include <crypto/internal/hash.h>
298c926c 22#include <crypto/internal/aead.h>
1cac2cbc 23#include <crypto/cryptd.h>
254eff77 24#include <crypto/crypto_wq.h>
124b53d0
HX
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
124b53d0
HX
28#include <linux/list.h>
29#include <linux/module.h>
124b53d0
HX
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
124b53d0 33
254eff77 34#define CRYPTD_MAX_CPU_QLEN 100
124b53d0 35
254eff77 36struct cryptd_cpu_queue {
124b53d0 37 struct crypto_queue queue;
254eff77
HY
38 struct work_struct work;
39};
40
41struct cryptd_queue {
a29d8b8e 42 struct cryptd_cpu_queue __percpu *cpu_queue;
124b53d0
HX
43};
44
45struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
254eff77 47 struct cryptd_queue *queue;
124b53d0
HX
48};
49
46309d89
HX
50struct hashd_instance_ctx {
51 struct crypto_shash_spawn spawn;
52 struct cryptd_queue *queue;
53};
54
298c926c
AH
55struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58};
59
124b53d0
HX
60struct cryptd_blkcipher_ctx {
61 struct crypto_blkcipher *child;
62};
63
64struct cryptd_blkcipher_request_ctx {
65 crypto_completion_t complete;
66};
67
b8a28251 68struct cryptd_hash_ctx {
46309d89 69 struct crypto_shash *child;
b8a28251
LH
70};
71
72struct cryptd_hash_request_ctx {
73 crypto_completion_t complete;
46309d89 74 struct shash_desc desc;
b8a28251 75};
124b53d0 76
298c926c
AH
77struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83};
84
254eff77
HY
85static void cryptd_queue_worker(struct work_struct *work);
86
87static int cryptd_init_queue(struct cryptd_queue *queue,
88 unsigned int max_cpu_qlen)
89{
90 int cpu;
91 struct cryptd_cpu_queue *cpu_queue;
92
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94 if (!queue->cpu_queue)
95 return -ENOMEM;
96 for_each_possible_cpu(cpu) {
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100 }
101 return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106 int cpu;
107 struct cryptd_cpu_queue *cpu_queue;
108
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 BUG_ON(cpu_queue->queue.qlen);
112 }
113 free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117 struct crypto_async_request *request)
118{
119 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue;
121
122 cpu = get_cpu();
0b44f486 123 cpu_queue = this_cpu_ptr(queue->cpu_queue);
254eff77
HY
124 err = crypto_enqueue_request(&cpu_queue->queue, request);
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126 put_cpu();
127
128 return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136 struct cryptd_cpu_queue *cpu_queue;
137 struct crypto_async_request *req, *backlog;
138
139 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140 /* Only handle one request at a time to avoid hogging crypto
141 * workqueue. preempt_disable/enable is used to prevent
142 * being preempted by cryptd_enqueue_request() */
143 preempt_disable();
144 backlog = crypto_get_backlog(&cpu_queue->queue);
145 req = crypto_dequeue_request(&cpu_queue->queue);
146 preempt_enable();
147
148 if (!req)
149 return;
150
151 if (backlog)
152 backlog->complete(backlog, -EINPROGRESS);
153 req->complete(req, 0);
154
155 if (cpu_queue->queue.qlen)
156 queue_work(kcrypto_wq, &cpu_queue->work);
157}
158
159static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
124b53d0
HX
160{
161 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
162 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
254eff77 163 return ictx->queue;
124b53d0
HX
164}
165
166static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
167 const u8 *key, unsigned int keylen)
168{
169 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
170 struct crypto_blkcipher *child = ctx->child;
171 int err;
172
173 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
174 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
175 CRYPTO_TFM_REQ_MASK);
176 err = crypto_blkcipher_setkey(child, key, keylen);
177 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
178 CRYPTO_TFM_RES_MASK);
179 return err;
180}
181
182static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
183 struct crypto_blkcipher *child,
184 int err,
185 int (*crypt)(struct blkcipher_desc *desc,
186 struct scatterlist *dst,
187 struct scatterlist *src,
188 unsigned int len))
189{
190 struct cryptd_blkcipher_request_ctx *rctx;
191 struct blkcipher_desc desc;
192
193 rctx = ablkcipher_request_ctx(req);
194
93aa7f8a
HX
195 if (unlikely(err == -EINPROGRESS))
196 goto out;
124b53d0
HX
197
198 desc.tfm = child;
199 desc.info = req->info;
200 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
201
202 err = crypt(&desc, req->dst, req->src, req->nbytes);
203
204 req->base.complete = rctx->complete;
205
93aa7f8a 206out:
124b53d0 207 local_bh_disable();
93aa7f8a 208 rctx->complete(&req->base, err);
124b53d0
HX
209 local_bh_enable();
210}
211
212static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
213{
214 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
215 struct crypto_blkcipher *child = ctx->child;
216
217 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
218 crypto_blkcipher_crt(child)->encrypt);
219}
220
221static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
222{
223 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
224 struct crypto_blkcipher *child = ctx->child;
225
226 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
227 crypto_blkcipher_crt(child)->decrypt);
228}
229
230static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
231 crypto_completion_t complete)
232{
233 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
234 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
254eff77 235 struct cryptd_queue *queue;
124b53d0 236
254eff77 237 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
124b53d0
HX
238 rctx->complete = req->base.complete;
239 req->base.complete = complete;
240
254eff77 241 return cryptd_enqueue_request(queue, &req->base);
124b53d0
HX
242}
243
244static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
245{
246 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
247}
248
249static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
250{
251 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
252}
253
254static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
255{
256 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
257 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
258 struct crypto_spawn *spawn = &ictx->spawn;
259 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
260 struct crypto_blkcipher *cipher;
261
262 cipher = crypto_spawn_blkcipher(spawn);
263 if (IS_ERR(cipher))
264 return PTR_ERR(cipher);
265
266 ctx->child = cipher;
267 tfm->crt_ablkcipher.reqsize =
268 sizeof(struct cryptd_blkcipher_request_ctx);
269 return 0;
270}
271
272static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
273{
274 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
124b53d0
HX
275
276 crypto_free_blkcipher(ctx->child);
277}
278
0b535adf
HX
279static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
280 unsigned int tail)
124b53d0 281{
0b535adf 282 char *p;
124b53d0 283 struct crypto_instance *inst;
124b53d0
HX
284 int err;
285
0b535adf
HX
286 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
287 if (!p)
288 return ERR_PTR(-ENOMEM);
289
290 inst = (void *)(p + head);
124b53d0
HX
291
292 err = -ENAMETOOLONG;
293 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
294 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
295 goto out_free_inst;
296
124b53d0
HX
297 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
298
299 inst->alg.cra_priority = alg->cra_priority + 50;
300 inst->alg.cra_blocksize = alg->cra_blocksize;
301 inst->alg.cra_alignmask = alg->cra_alignmask;
302
303out:
0b535adf 304 return p;
124b53d0
HX
305
306out_free_inst:
0b535adf
HX
307 kfree(p);
308 p = ERR_PTR(err);
124b53d0
HX
309 goto out;
310}
311
9cd899a3
HX
312static int cryptd_create_blkcipher(struct crypto_template *tmpl,
313 struct rtattr **tb,
314 struct cryptd_queue *queue)
124b53d0 315{
46309d89 316 struct cryptd_instance_ctx *ctx;
124b53d0
HX
317 struct crypto_instance *inst;
318 struct crypto_alg *alg;
46309d89 319 int err;
124b53d0
HX
320
321 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
332f8840 322 CRYPTO_ALG_TYPE_MASK);
124b53d0 323 if (IS_ERR(alg))
9cd899a3 324 return PTR_ERR(alg);
124b53d0 325
0b535adf 326 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
05ed8758 327 err = PTR_ERR(inst);
124b53d0
HX
328 if (IS_ERR(inst))
329 goto out_put_alg;
330
46309d89
HX
331 ctx = crypto_instance_ctx(inst);
332 ctx->queue = queue;
333
334 err = crypto_init_spawn(&ctx->spawn, alg, inst,
335 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
336 if (err)
337 goto out_free_inst;
338
332f8840 339 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
124b53d0
HX
340 inst->alg.cra_type = &crypto_ablkcipher_type;
341
342 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
343 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
344 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
345
927eead5
HX
346 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
347
124b53d0
HX
348 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
349
350 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
351 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
352
353 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
354 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
355 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
356
9cd899a3
HX
357 err = crypto_register_instance(tmpl, inst);
358 if (err) {
359 crypto_drop_spawn(&ctx->spawn);
360out_free_inst:
361 kfree(inst);
362 }
363
124b53d0
HX
364out_put_alg:
365 crypto_mod_put(alg);
9cd899a3 366 return err;
124b53d0
HX
367}
368
b8a28251
LH
369static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
370{
371 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
46309d89
HX
372 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
373 struct crypto_shash_spawn *spawn = &ictx->spawn;
b8a28251 374 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
46309d89 375 struct crypto_shash *hash;
b8a28251 376
46309d89
HX
377 hash = crypto_spawn_shash(spawn);
378 if (IS_ERR(hash))
379 return PTR_ERR(hash);
b8a28251 380
46309d89 381 ctx->child = hash;
0d6669e2
HX
382 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
383 sizeof(struct cryptd_hash_request_ctx) +
384 crypto_shash_descsize(hash));
b8a28251
LH
385 return 0;
386}
387
388static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
389{
390 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
b8a28251 391
46309d89 392 crypto_free_shash(ctx->child);
b8a28251
LH
393}
394
395static int cryptd_hash_setkey(struct crypto_ahash *parent,
396 const u8 *key, unsigned int keylen)
397{
398 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
46309d89 399 struct crypto_shash *child = ctx->child;
b8a28251
LH
400 int err;
401
46309d89
HX
402 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
403 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
404 CRYPTO_TFM_REQ_MASK);
405 err = crypto_shash_setkey(child, key, keylen);
406 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
407 CRYPTO_TFM_RES_MASK);
b8a28251
LH
408 return err;
409}
410
411static int cryptd_hash_enqueue(struct ahash_request *req,
412 crypto_completion_t complete)
413{
414 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
415 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
254eff77
HY
416 struct cryptd_queue *queue =
417 cryptd_get_queue(crypto_ahash_tfm(tfm));
b8a28251
LH
418
419 rctx->complete = req->base.complete;
420 req->base.complete = complete;
421
254eff77 422 return cryptd_enqueue_request(queue, &req->base);
b8a28251
LH
423}
424
425static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
426{
46309d89
HX
427 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
428 struct crypto_shash *child = ctx->child;
429 struct ahash_request *req = ahash_request_cast(req_async);
430 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
431 struct shash_desc *desc = &rctx->desc;
b8a28251
LH
432
433 if (unlikely(err == -EINPROGRESS))
434 goto out;
435
46309d89
HX
436 desc->tfm = child;
437 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
b8a28251 438
46309d89 439 err = crypto_shash_init(desc);
b8a28251
LH
440
441 req->base.complete = rctx->complete;
442
443out:
444 local_bh_disable();
445 rctx->complete(&req->base, err);
446 local_bh_enable();
447}
448
449static int cryptd_hash_init_enqueue(struct ahash_request *req)
450{
451 return cryptd_hash_enqueue(req, cryptd_hash_init);
452}
453
454static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
455{
46309d89 456 struct ahash_request *req = ahash_request_cast(req_async);
b8a28251 457 struct cryptd_hash_request_ctx *rctx;
b8a28251
LH
458
459 rctx = ahash_request_ctx(req);
460
461 if (unlikely(err == -EINPROGRESS))
462 goto out;
463
46309d89 464 err = shash_ahash_update(req, &rctx->desc);
b8a28251
LH
465
466 req->base.complete = rctx->complete;
467
468out:
469 local_bh_disable();
470 rctx->complete(&req->base, err);
471 local_bh_enable();
472}
473
474static int cryptd_hash_update_enqueue(struct ahash_request *req)
475{
476 return cryptd_hash_enqueue(req, cryptd_hash_update);
477}
478
479static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
480{
46309d89
HX
481 struct ahash_request *req = ahash_request_cast(req_async);
482 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
b8a28251
LH
483
484 if (unlikely(err == -EINPROGRESS))
485 goto out;
486
46309d89 487 err = crypto_shash_final(&rctx->desc, req->result);
b8a28251
LH
488
489 req->base.complete = rctx->complete;
490
491out:
492 local_bh_disable();
493 rctx->complete(&req->base, err);
494 local_bh_enable();
495}
496
497static int cryptd_hash_final_enqueue(struct ahash_request *req)
498{
499 return cryptd_hash_enqueue(req, cryptd_hash_final);
500}
501
6fba00d1
HX
502static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
503{
504 struct ahash_request *req = ahash_request_cast(req_async);
505 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506
507 if (unlikely(err == -EINPROGRESS))
508 goto out;
509
510 err = shash_ahash_finup(req, &rctx->desc);
511
512 req->base.complete = rctx->complete;
513
514out:
515 local_bh_disable();
516 rctx->complete(&req->base, err);
517 local_bh_enable();
518}
519
520static int cryptd_hash_finup_enqueue(struct ahash_request *req)
521{
522 return cryptd_hash_enqueue(req, cryptd_hash_finup);
523}
524
b8a28251
LH
525static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
526{
46309d89
HX
527 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
528 struct crypto_shash *child = ctx->child;
529 struct ahash_request *req = ahash_request_cast(req_async);
530 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
531 struct shash_desc *desc = &rctx->desc;
b8a28251
LH
532
533 if (unlikely(err == -EINPROGRESS))
534 goto out;
535
46309d89
HX
536 desc->tfm = child;
537 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
b8a28251 538
46309d89 539 err = shash_ahash_digest(req, desc);
b8a28251
LH
540
541 req->base.complete = rctx->complete;
542
543out:
544 local_bh_disable();
545 rctx->complete(&req->base, err);
546 local_bh_enable();
547}
548
549static int cryptd_hash_digest_enqueue(struct ahash_request *req)
550{
551 return cryptd_hash_enqueue(req, cryptd_hash_digest);
552}
553
6fba00d1
HX
554static int cryptd_hash_export(struct ahash_request *req, void *out)
555{
556 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
557
558 return crypto_shash_export(&rctx->desc, out);
559}
560
561static int cryptd_hash_import(struct ahash_request *req, const void *in)
562{
563 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
564
565 return crypto_shash_import(&rctx->desc, in);
566}
567
9cd899a3
HX
568static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
569 struct cryptd_queue *queue)
b8a28251 570{
46309d89 571 struct hashd_instance_ctx *ctx;
0b535adf 572 struct ahash_instance *inst;
46309d89 573 struct shash_alg *salg;
b8a28251 574 struct crypto_alg *alg;
46309d89 575 int err;
b8a28251 576
46309d89
HX
577 salg = shash_attr_alg(tb[1], 0, 0);
578 if (IS_ERR(salg))
9cd899a3 579 return PTR_ERR(salg);
b8a28251 580
46309d89 581 alg = &salg->base;
0b535adf
HX
582 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
583 sizeof(*ctx));
05ed8758 584 err = PTR_ERR(inst);
b8a28251
LH
585 if (IS_ERR(inst))
586 goto out_put_alg;
587
0b535adf 588 ctx = ahash_instance_ctx(inst);
46309d89
HX
589 ctx->queue = queue;
590
0b535adf
HX
591 err = crypto_init_shash_spawn(&ctx->spawn, salg,
592 ahash_crypto_instance(inst));
46309d89
HX
593 if (err)
594 goto out_free_inst;
595
0b535adf 596 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
b8a28251 597
0b535adf
HX
598 inst->alg.halg.digestsize = salg->digestsize;
599 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
b8a28251 600
0b535adf
HX
601 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
602 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
b8a28251 603
0b535adf
HX
604 inst->alg.init = cryptd_hash_init_enqueue;
605 inst->alg.update = cryptd_hash_update_enqueue;
606 inst->alg.final = cryptd_hash_final_enqueue;
6fba00d1
HX
607 inst->alg.finup = cryptd_hash_finup_enqueue;
608 inst->alg.export = cryptd_hash_export;
609 inst->alg.import = cryptd_hash_import;
0b535adf
HX
610 inst->alg.setkey = cryptd_hash_setkey;
611 inst->alg.digest = cryptd_hash_digest_enqueue;
b8a28251 612
0b535adf 613 err = ahash_register_instance(tmpl, inst);
9cd899a3
HX
614 if (err) {
615 crypto_drop_shash(&ctx->spawn);
616out_free_inst:
617 kfree(inst);
618 }
619
b8a28251
LH
620out_put_alg:
621 crypto_mod_put(alg);
9cd899a3 622 return err;
b8a28251
LH
623}
624
298c926c
AH
625static void cryptd_aead_crypt(struct aead_request *req,
626 struct crypto_aead *child,
627 int err,
628 int (*crypt)(struct aead_request *req))
629{
630 struct cryptd_aead_request_ctx *rctx;
631 rctx = aead_request_ctx(req);
632
633 if (unlikely(err == -EINPROGRESS))
634 goto out;
635 aead_request_set_tfm(req, child);
636 err = crypt( req );
637 req->base.complete = rctx->complete;
638out:
639 local_bh_disable();
640 rctx->complete(&req->base, err);
641 local_bh_enable();
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647 struct crypto_aead *child = ctx->child;
648 struct aead_request *req;
649
650 req = container_of(areq, struct aead_request, base);
651 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657 struct crypto_aead *child = ctx->child;
658 struct aead_request *req;
659
660 req = container_of(areq, struct aead_request, base);
661 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665 crypto_completion_t complete)
666{
667 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
670
671 rctx->complete = req->base.complete;
672 req->base.complete = complete;
673 return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689 struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692 struct crypto_aead *cipher;
693
694 cipher = crypto_spawn_aead(spawn);
695 if (IS_ERR(cipher))
696 return PTR_ERR(cipher);
697
698 crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699 ctx->child = cipher;
700 tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
701 return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707 crypto_free_aead(ctx->child);
708}
709
710static int cryptd_create_aead(struct crypto_template *tmpl,
711 struct rtattr **tb,
712 struct cryptd_queue *queue)
713{
714 struct aead_instance_ctx *ctx;
715 struct crypto_instance *inst;
716 struct crypto_alg *alg;
717 int err;
718
719 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720 CRYPTO_ALG_TYPE_MASK);
721 if (IS_ERR(alg))
722 return PTR_ERR(alg);
723
724 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725 err = PTR_ERR(inst);
726 if (IS_ERR(inst))
727 goto out_put_alg;
728
729 ctx = crypto_instance_ctx(inst);
730 ctx->queue = queue;
731
732 err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734 if (err)
735 goto out_free_inst;
736
737 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738 inst->alg.cra_type = alg->cra_type;
739 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740 inst->alg.cra_init = cryptd_aead_init_tfm;
741 inst->alg.cra_exit = cryptd_aead_exit_tfm;
742 inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
743 inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
745 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
746 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747 inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
748 inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
749 inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
750 inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
751
752 err = crypto_register_instance(tmpl, inst);
753 if (err) {
754 crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756 kfree(inst);
757 }
758out_put_alg:
759 crypto_mod_put(alg);
760 return err;
761}
762
254eff77 763static struct cryptd_queue queue;
124b53d0 764
9cd899a3 765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
124b53d0
HX
766{
767 struct crypto_attr_type *algt;
768
769 algt = crypto_get_attr_type(tb);
770 if (IS_ERR(algt))
9cd899a3 771 return PTR_ERR(algt);
124b53d0
HX
772
773 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
774 case CRYPTO_ALG_TYPE_BLKCIPHER:
9cd899a3 775 return cryptd_create_blkcipher(tmpl, tb, &queue);
b8a28251 776 case CRYPTO_ALG_TYPE_DIGEST:
9cd899a3 777 return cryptd_create_hash(tmpl, tb, &queue);
298c926c
AH
778 case CRYPTO_ALG_TYPE_AEAD:
779 return cryptd_create_aead(tmpl, tb, &queue);
124b53d0
HX
780 }
781
9cd899a3 782 return -EINVAL;
124b53d0
HX
783}
784
785static void cryptd_free(struct crypto_instance *inst)
786{
787 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
0b535adf 788 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
298c926c 789 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
0b535adf
HX
790
791 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
792 case CRYPTO_ALG_TYPE_AHASH:
793 crypto_drop_shash(&hctx->spawn);
794 kfree(ahash_instance(inst));
795 return;
298c926c
AH
796 case CRYPTO_ALG_TYPE_AEAD:
797 crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798 kfree(inst);
799 return;
800 default:
801 crypto_drop_spawn(&ctx->spawn);
802 kfree(inst);
0b535adf 803 }
124b53d0
HX
804}
805
806static struct crypto_template cryptd_tmpl = {
807 .name = "cryptd",
9cd899a3 808 .create = cryptd_create,
124b53d0
HX
809 .free = cryptd_free,
810 .module = THIS_MODULE,
811};
812
1cac2cbc
HY
813struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
814 u32 type, u32 mask)
815{
816 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
505fd21d 817 struct crypto_tfm *tfm;
1cac2cbc
HY
818
819 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
820 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
821 return ERR_PTR(-EINVAL);
505fd21d
HY
822 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
823 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
824 mask &= ~CRYPTO_ALG_TYPE_MASK;
825 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
826 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1cac2cbc
HY
827 if (IS_ERR(tfm))
828 return ERR_CAST(tfm);
505fd21d
HY
829 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
830 crypto_free_tfm(tfm);
1cac2cbc
HY
831 return ERR_PTR(-EINVAL);
832 }
833
505fd21d 834 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1cac2cbc
HY
835}
836EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
837
838struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
839{
840 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
841 return ctx->child;
842}
843EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
844
845void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
846{
847 crypto_free_ablkcipher(&tfm->base);
848}
849EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
850
ace13663
HY
851struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
852 u32 type, u32 mask)
853{
854 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
855 struct crypto_ahash *tfm;
856
857 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
858 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
859 return ERR_PTR(-EINVAL);
860 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
861 if (IS_ERR(tfm))
862 return ERR_CAST(tfm);
863 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
864 crypto_free_ahash(tfm);
865 return ERR_PTR(-EINVAL);
866 }
867
868 return __cryptd_ahash_cast(tfm);
869}
870EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
871
872struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
873{
874 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
875
876 return ctx->child;
877}
878EXPORT_SYMBOL_GPL(cryptd_ahash_child);
879
0e1227d3
HY
880struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
881{
882 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
883 return &rctx->desc;
884}
885EXPORT_SYMBOL_GPL(cryptd_shash_desc);
886
ace13663
HY
887void cryptd_free_ahash(struct cryptd_ahash *tfm)
888{
889 crypto_free_ahash(&tfm->base);
890}
891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
892
298c926c
AH
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894 u32 type, u32 mask)
895{
896 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
897 struct crypto_aead *tfm;
898
899 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901 return ERR_PTR(-EINVAL);
902 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903 if (IS_ERR(tfm))
904 return ERR_CAST(tfm);
905 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906 crypto_free_aead(tfm);
907 return ERR_PTR(-EINVAL);
908 }
909 return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915 struct cryptd_aead_ctx *ctx;
916 ctx = crypto_aead_ctx(&tfm->base);
917 return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923 crypto_free_aead(&tfm->base);
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
124b53d0
HX
927static int __init cryptd_init(void)
928{
929 int err;
930
254eff77 931 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
124b53d0
HX
932 if (err)
933 return err;
934
935 err = crypto_register_template(&cryptd_tmpl);
936 if (err)
254eff77 937 cryptd_fini_queue(&queue);
124b53d0
HX
938
939 return err;
940}
941
942static void __exit cryptd_exit(void)
943{
254eff77 944 cryptd_fini_queue(&queue);
124b53d0
HX
945 crypto_unregister_template(&cryptd_tmpl);
946}
947
948module_init(cryptd_init);
949module_exit(cryptd_exit);
950
951MODULE_LICENSE("GPL");
952MODULE_DESCRIPTION("Software async crypto daemon");