]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
298c926c AH |
6 | * Added AEAD support to cryptd. |
7 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) | |
8 | * Adrian Hoban <adrian.hoban@intel.com> | |
9 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | |
10 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | |
11 | * Copyright (c) 2010, Intel Corporation. | |
12 | * | |
124b53d0 HX |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free | |
15 | * Software Foundation; either version 2 of the License, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | */ | |
19 | ||
18e33e6d | 20 | #include <crypto/internal/hash.h> |
298c926c | 21 | #include <crypto/internal/aead.h> |
4e0958d1 | 22 | #include <crypto/internal/skcipher.h> |
1cac2cbc | 23 | #include <crypto/cryptd.h> |
254eff77 | 24 | #include <crypto/crypto_wq.h> |
81760ea6 | 25 | #include <linux/atomic.h> |
124b53d0 HX |
26 | #include <linux/err.h> |
27 | #include <linux/init.h> | |
28 | #include <linux/kernel.h> | |
124b53d0 HX |
29 | #include <linux/list.h> |
30 | #include <linux/module.h> | |
124b53d0 HX |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/sched.h> | |
33 | #include <linux/slab.h> | |
124b53d0 | 34 | |
81760ea6 | 35 | #define CRYPTD_MAX_CPU_QLEN 1000 |
124b53d0 | 36 | |
254eff77 | 37 | struct cryptd_cpu_queue { |
124b53d0 | 38 | struct crypto_queue queue; |
254eff77 HY |
39 | struct work_struct work; |
40 | }; | |
41 | ||
42 | struct cryptd_queue { | |
a29d8b8e | 43 | struct cryptd_cpu_queue __percpu *cpu_queue; |
124b53d0 HX |
44 | }; |
45 | ||
46 | struct cryptd_instance_ctx { | |
47 | struct crypto_spawn spawn; | |
254eff77 | 48 | struct cryptd_queue *queue; |
124b53d0 HX |
49 | }; |
50 | ||
4e0958d1 HX |
51 | struct skcipherd_instance_ctx { |
52 | struct crypto_skcipher_spawn spawn; | |
53 | struct cryptd_queue *queue; | |
54 | }; | |
55 | ||
46309d89 HX |
56 | struct hashd_instance_ctx { |
57 | struct crypto_shash_spawn spawn; | |
58 | struct cryptd_queue *queue; | |
59 | }; | |
60 | ||
298c926c AH |
61 | struct aead_instance_ctx { |
62 | struct crypto_aead_spawn aead_spawn; | |
63 | struct cryptd_queue *queue; | |
64 | }; | |
65 | ||
124b53d0 | 66 | struct cryptd_blkcipher_ctx { |
81760ea6 | 67 | atomic_t refcnt; |
124b53d0 HX |
68 | struct crypto_blkcipher *child; |
69 | }; | |
70 | ||
71 | struct cryptd_blkcipher_request_ctx { | |
72 | crypto_completion_t complete; | |
73 | }; | |
74 | ||
4e0958d1 HX |
75 | struct cryptd_skcipher_ctx { |
76 | atomic_t refcnt; | |
77 | struct crypto_skcipher *child; | |
78 | }; | |
79 | ||
80 | struct cryptd_skcipher_request_ctx { | |
81 | crypto_completion_t complete; | |
82 | }; | |
83 | ||
b8a28251 | 84 | struct cryptd_hash_ctx { |
81760ea6 | 85 | atomic_t refcnt; |
46309d89 | 86 | struct crypto_shash *child; |
b8a28251 LH |
87 | }; |
88 | ||
89 | struct cryptd_hash_request_ctx { | |
90 | crypto_completion_t complete; | |
46309d89 | 91 | struct shash_desc desc; |
b8a28251 | 92 | }; |
124b53d0 | 93 | |
298c926c | 94 | struct cryptd_aead_ctx { |
81760ea6 | 95 | atomic_t refcnt; |
298c926c AH |
96 | struct crypto_aead *child; |
97 | }; | |
98 | ||
99 | struct cryptd_aead_request_ctx { | |
100 | crypto_completion_t complete; | |
101 | }; | |
102 | ||
254eff77 HY |
103 | static void cryptd_queue_worker(struct work_struct *work); |
104 | ||
105 | static int cryptd_init_queue(struct cryptd_queue *queue, | |
106 | unsigned int max_cpu_qlen) | |
107 | { | |
108 | int cpu; | |
109 | struct cryptd_cpu_queue *cpu_queue; | |
110 | ||
111 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | |
112 | if (!queue->cpu_queue) | |
113 | return -ENOMEM; | |
114 | for_each_possible_cpu(cpu) { | |
115 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
116 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
117 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | |
118 | } | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static void cryptd_fini_queue(struct cryptd_queue *queue) | |
123 | { | |
124 | int cpu; | |
125 | struct cryptd_cpu_queue *cpu_queue; | |
126 | ||
127 | for_each_possible_cpu(cpu) { | |
128 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
129 | BUG_ON(cpu_queue->queue.qlen); | |
130 | } | |
131 | free_percpu(queue->cpu_queue); | |
132 | } | |
133 | ||
134 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | |
135 | struct crypto_async_request *request) | |
136 | { | |
137 | int cpu, err; | |
138 | struct cryptd_cpu_queue *cpu_queue; | |
81760ea6 HX |
139 | atomic_t *refcnt; |
140 | bool may_backlog; | |
254eff77 HY |
141 | |
142 | cpu = get_cpu(); | |
0b44f486 | 143 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
254eff77 | 144 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
81760ea6 HX |
145 | |
146 | refcnt = crypto_tfm_ctx(request->tfm); | |
147 | may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; | |
148 | ||
149 | if (err == -EBUSY && !may_backlog) | |
150 | goto out_put_cpu; | |
151 | ||
254eff77 | 152 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
81760ea6 HX |
153 | |
154 | if (!atomic_read(refcnt)) | |
155 | goto out_put_cpu; | |
156 | ||
81760ea6 HX |
157 | atomic_inc(refcnt); |
158 | ||
159 | out_put_cpu: | |
254eff77 HY |
160 | put_cpu(); |
161 | ||
162 | return err; | |
163 | } | |
164 | ||
165 | /* Called in workqueue context, do one real cryption work (via | |
166 | * req->complete) and reschedule itself if there are more work to | |
167 | * do. */ | |
168 | static void cryptd_queue_worker(struct work_struct *work) | |
169 | { | |
170 | struct cryptd_cpu_queue *cpu_queue; | |
171 | struct crypto_async_request *req, *backlog; | |
172 | ||
173 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | |
9efade1b JK |
174 | /* |
175 | * Only handle one request at a time to avoid hogging crypto workqueue. | |
176 | * preempt_disable/enable is used to prevent being preempted by | |
177 | * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent | |
178 | * cryptd_enqueue_request() being accessed from software interrupts. | |
179 | */ | |
180 | local_bh_disable(); | |
254eff77 HY |
181 | preempt_disable(); |
182 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
183 | req = crypto_dequeue_request(&cpu_queue->queue); | |
184 | preempt_enable(); | |
9efade1b | 185 | local_bh_enable(); |
254eff77 HY |
186 | |
187 | if (!req) | |
188 | return; | |
189 | ||
190 | if (backlog) | |
191 | backlog->complete(backlog, -EINPROGRESS); | |
192 | req->complete(req, 0); | |
193 | ||
194 | if (cpu_queue->queue.qlen) | |
195 | queue_work(kcrypto_wq, &cpu_queue->work); | |
196 | } | |
197 | ||
198 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | |
124b53d0 HX |
199 | { |
200 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
201 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
254eff77 | 202 | return ictx->queue; |
124b53d0 HX |
203 | } |
204 | ||
466a7b9e SM |
205 | static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, |
206 | u32 *mask) | |
207 | { | |
208 | struct crypto_attr_type *algt; | |
209 | ||
210 | algt = crypto_get_attr_type(tb); | |
211 | if (IS_ERR(algt)) | |
212 | return; | |
f6da3205 | 213 | |
5e4b8c1f HX |
214 | *type |= algt->type & CRYPTO_ALG_INTERNAL; |
215 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
466a7b9e SM |
216 | } |
217 | ||
124b53d0 HX |
218 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
219 | const u8 *key, unsigned int keylen) | |
220 | { | |
221 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
222 | struct crypto_blkcipher *child = ctx->child; | |
223 | int err; | |
224 | ||
225 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
226 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
227 | CRYPTO_TFM_REQ_MASK); | |
228 | err = crypto_blkcipher_setkey(child, key, keylen); | |
229 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
230 | CRYPTO_TFM_RES_MASK); | |
231 | return err; | |
232 | } | |
233 | ||
234 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
235 | struct crypto_blkcipher *child, | |
236 | int err, | |
237 | int (*crypt)(struct blkcipher_desc *desc, | |
238 | struct scatterlist *dst, | |
239 | struct scatterlist *src, | |
240 | unsigned int len)) | |
241 | { | |
242 | struct cryptd_blkcipher_request_ctx *rctx; | |
81760ea6 HX |
243 | struct cryptd_blkcipher_ctx *ctx; |
244 | struct crypto_ablkcipher *tfm; | |
124b53d0 | 245 | struct blkcipher_desc desc; |
81760ea6 | 246 | int refcnt; |
124b53d0 HX |
247 | |
248 | rctx = ablkcipher_request_ctx(req); | |
249 | ||
93aa7f8a HX |
250 | if (unlikely(err == -EINPROGRESS)) |
251 | goto out; | |
124b53d0 HX |
252 | |
253 | desc.tfm = child; | |
254 | desc.info = req->info; | |
255 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
256 | ||
257 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
258 | ||
259 | req->base.complete = rctx->complete; | |
260 | ||
93aa7f8a | 261 | out: |
81760ea6 HX |
262 | tfm = crypto_ablkcipher_reqtfm(req); |
263 | ctx = crypto_ablkcipher_ctx(tfm); | |
264 | refcnt = atomic_read(&ctx->refcnt); | |
265 | ||
124b53d0 | 266 | local_bh_disable(); |
93aa7f8a | 267 | rctx->complete(&req->base, err); |
124b53d0 | 268 | local_bh_enable(); |
81760ea6 HX |
269 | |
270 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
271 | crypto_free_ablkcipher(tfm); | |
124b53d0 HX |
272 | } |
273 | ||
274 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
275 | { | |
276 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
277 | struct crypto_blkcipher *child = ctx->child; | |
278 | ||
279 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
280 | crypto_blkcipher_crt(child)->encrypt); | |
281 | } | |
282 | ||
283 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
284 | { | |
285 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
286 | struct crypto_blkcipher *child = ctx->child; | |
287 | ||
288 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
289 | crypto_blkcipher_crt(child)->decrypt); | |
290 | } | |
291 | ||
292 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
3e3dc25f | 293 | crypto_completion_t compl) |
124b53d0 HX |
294 | { |
295 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
296 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
254eff77 | 297 | struct cryptd_queue *queue; |
124b53d0 | 298 | |
254eff77 | 299 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
124b53d0 | 300 | rctx->complete = req->base.complete; |
3e3dc25f | 301 | req->base.complete = compl; |
124b53d0 | 302 | |
254eff77 | 303 | return cryptd_enqueue_request(queue, &req->base); |
124b53d0 HX |
304 | } |
305 | ||
306 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
307 | { | |
308 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
309 | } | |
310 | ||
311 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
312 | { | |
313 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
314 | } | |
315 | ||
316 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
317 | { | |
318 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
319 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
320 | struct crypto_spawn *spawn = &ictx->spawn; | |
321 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
322 | struct crypto_blkcipher *cipher; | |
323 | ||
324 | cipher = crypto_spawn_blkcipher(spawn); | |
325 | if (IS_ERR(cipher)) | |
326 | return PTR_ERR(cipher); | |
327 | ||
328 | ctx->child = cipher; | |
329 | tfm->crt_ablkcipher.reqsize = | |
330 | sizeof(struct cryptd_blkcipher_request_ctx); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
335 | { | |
336 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
124b53d0 HX |
337 | |
338 | crypto_free_blkcipher(ctx->child); | |
339 | } | |
340 | ||
9b8c456e HX |
341 | static int cryptd_init_instance(struct crypto_instance *inst, |
342 | struct crypto_alg *alg) | |
343 | { | |
344 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
345 | "cryptd(%s)", | |
346 | alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
347 | return -ENAMETOOLONG; | |
348 | ||
349 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
350 | ||
351 | inst->alg.cra_priority = alg->cra_priority + 50; | |
352 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
353 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
0b535adf HX |
358 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
359 | unsigned int tail) | |
124b53d0 | 360 | { |
0b535adf | 361 | char *p; |
124b53d0 | 362 | struct crypto_instance *inst; |
124b53d0 HX |
363 | int err; |
364 | ||
0b535adf HX |
365 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
366 | if (!p) | |
367 | return ERR_PTR(-ENOMEM); | |
368 | ||
369 | inst = (void *)(p + head); | |
124b53d0 | 370 | |
9b8c456e HX |
371 | err = cryptd_init_instance(inst, alg); |
372 | if (err) | |
124b53d0 HX |
373 | goto out_free_inst; |
374 | ||
124b53d0 | 375 | out: |
0b535adf | 376 | return p; |
124b53d0 HX |
377 | |
378 | out_free_inst: | |
0b535adf HX |
379 | kfree(p); |
380 | p = ERR_PTR(err); | |
124b53d0 HX |
381 | goto out; |
382 | } | |
383 | ||
9cd899a3 HX |
384 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
385 | struct rtattr **tb, | |
386 | struct cryptd_queue *queue) | |
124b53d0 | 387 | { |
46309d89 | 388 | struct cryptd_instance_ctx *ctx; |
124b53d0 HX |
389 | struct crypto_instance *inst; |
390 | struct crypto_alg *alg; | |
466a7b9e SM |
391 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; |
392 | u32 mask = CRYPTO_ALG_TYPE_MASK; | |
46309d89 | 393 | int err; |
124b53d0 | 394 | |
466a7b9e SM |
395 | cryptd_check_internal(tb, &type, &mask); |
396 | ||
397 | alg = crypto_get_attr_alg(tb, type, mask); | |
124b53d0 | 398 | if (IS_ERR(alg)) |
9cd899a3 | 399 | return PTR_ERR(alg); |
124b53d0 | 400 | |
0b535adf | 401 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
05ed8758 | 402 | err = PTR_ERR(inst); |
124b53d0 HX |
403 | if (IS_ERR(inst)) |
404 | goto out_put_alg; | |
405 | ||
46309d89 HX |
406 | ctx = crypto_instance_ctx(inst); |
407 | ctx->queue = queue; | |
408 | ||
409 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
410 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
411 | if (err) | |
412 | goto out_free_inst; | |
413 | ||
466a7b9e SM |
414 | type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
415 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
416 | type |= CRYPTO_ALG_INTERNAL; | |
417 | inst->alg.cra_flags = type; | |
124b53d0 HX |
418 | inst->alg.cra_type = &crypto_ablkcipher_type; |
419 | ||
420 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
421 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
422 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
423 | ||
927eead5 HX |
424 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
425 | ||
124b53d0 HX |
426 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
427 | ||
428 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
429 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
430 | ||
431 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
432 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
433 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
434 | ||
9cd899a3 HX |
435 | err = crypto_register_instance(tmpl, inst); |
436 | if (err) { | |
437 | crypto_drop_spawn(&ctx->spawn); | |
438 | out_free_inst: | |
439 | kfree(inst); | |
440 | } | |
441 | ||
124b53d0 HX |
442 | out_put_alg: |
443 | crypto_mod_put(alg); | |
9cd899a3 | 444 | return err; |
124b53d0 HX |
445 | } |
446 | ||
4e0958d1 HX |
447 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, |
448 | const u8 *key, unsigned int keylen) | |
449 | { | |
450 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); | |
451 | struct crypto_skcipher *child = ctx->child; | |
452 | int err; | |
453 | ||
454 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
455 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
456 | CRYPTO_TFM_REQ_MASK); | |
457 | err = crypto_skcipher_setkey(child, key, keylen); | |
458 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | |
459 | CRYPTO_TFM_RES_MASK); | |
460 | return err; | |
461 | } | |
462 | ||
463 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) | |
464 | { | |
465 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
466 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
467 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
468 | int refcnt = atomic_read(&ctx->refcnt); | |
469 | ||
470 | local_bh_disable(); | |
471 | rctx->complete(&req->base, err); | |
472 | local_bh_enable(); | |
473 | ||
474 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
475 | crypto_free_skcipher(tfm); | |
476 | } | |
477 | ||
478 | static void cryptd_skcipher_encrypt(struct crypto_async_request *base, | |
479 | int err) | |
480 | { | |
481 | struct skcipher_request *req = skcipher_request_cast(base); | |
482 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
483 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
484 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
485 | struct crypto_skcipher *child = ctx->child; | |
486 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | |
487 | ||
488 | if (unlikely(err == -EINPROGRESS)) | |
489 | goto out; | |
490 | ||
491 | skcipher_request_set_tfm(subreq, child); | |
492 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | |
493 | NULL, NULL); | |
494 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
495 | req->iv); | |
496 | ||
497 | err = crypto_skcipher_encrypt(subreq); | |
498 | skcipher_request_zero(subreq); | |
499 | ||
500 | req->base.complete = rctx->complete; | |
501 | ||
502 | out: | |
503 | cryptd_skcipher_complete(req, err); | |
504 | } | |
505 | ||
506 | static void cryptd_skcipher_decrypt(struct crypto_async_request *base, | |
507 | int err) | |
508 | { | |
509 | struct skcipher_request *req = skcipher_request_cast(base); | |
510 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
511 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
512 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
513 | struct crypto_skcipher *child = ctx->child; | |
514 | SKCIPHER_REQUEST_ON_STACK(subreq, child); | |
515 | ||
516 | if (unlikely(err == -EINPROGRESS)) | |
517 | goto out; | |
518 | ||
519 | skcipher_request_set_tfm(subreq, child); | |
520 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, | |
521 | NULL, NULL); | |
522 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
523 | req->iv); | |
524 | ||
525 | err = crypto_skcipher_decrypt(subreq); | |
526 | skcipher_request_zero(subreq); | |
527 | ||
528 | req->base.complete = rctx->complete; | |
529 | ||
530 | out: | |
531 | cryptd_skcipher_complete(req, err); | |
532 | } | |
533 | ||
534 | static int cryptd_skcipher_enqueue(struct skcipher_request *req, | |
535 | crypto_completion_t compl) | |
536 | { | |
537 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
538 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
539 | struct cryptd_queue *queue; | |
540 | ||
541 | queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); | |
542 | rctx->complete = req->base.complete; | |
543 | req->base.complete = compl; | |
544 | ||
545 | return cryptd_enqueue_request(queue, &req->base); | |
546 | } | |
547 | ||
548 | static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) | |
549 | { | |
550 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); | |
551 | } | |
552 | ||
553 | static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) | |
554 | { | |
555 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); | |
556 | } | |
557 | ||
558 | static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) | |
559 | { | |
560 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | |
561 | struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); | |
562 | struct crypto_skcipher_spawn *spawn = &ictx->spawn; | |
563 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
564 | struct crypto_skcipher *cipher; | |
565 | ||
566 | cipher = crypto_spawn_skcipher(spawn); | |
567 | if (IS_ERR(cipher)) | |
568 | return PTR_ERR(cipher); | |
569 | ||
570 | ctx->child = cipher; | |
571 | crypto_skcipher_set_reqsize( | |
572 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); | |
573 | return 0; | |
574 | } | |
575 | ||
576 | static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) | |
577 | { | |
578 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
579 | ||
580 | crypto_free_skcipher(ctx->child); | |
581 | } | |
582 | ||
583 | static void cryptd_skcipher_free(struct skcipher_instance *inst) | |
584 | { | |
585 | struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); | |
586 | ||
587 | crypto_drop_skcipher(&ctx->spawn); | |
588 | } | |
589 | ||
590 | static int cryptd_create_skcipher(struct crypto_template *tmpl, | |
591 | struct rtattr **tb, | |
592 | struct cryptd_queue *queue) | |
593 | { | |
594 | struct skcipherd_instance_ctx *ctx; | |
595 | struct skcipher_instance *inst; | |
596 | struct skcipher_alg *alg; | |
597 | const char *name; | |
598 | u32 type; | |
599 | u32 mask; | |
600 | int err; | |
601 | ||
602 | type = 0; | |
603 | mask = CRYPTO_ALG_ASYNC; | |
604 | ||
605 | cryptd_check_internal(tb, &type, &mask); | |
606 | ||
607 | name = crypto_attr_alg_name(tb[1]); | |
608 | if (IS_ERR(name)) | |
609 | return PTR_ERR(name); | |
610 | ||
611 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
612 | if (!inst) | |
613 | return -ENOMEM; | |
614 | ||
615 | ctx = skcipher_instance_ctx(inst); | |
616 | ctx->queue = queue; | |
617 | ||
618 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | |
619 | err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); | |
620 | if (err) | |
621 | goto out_free_inst; | |
622 | ||
623 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); | |
624 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); | |
625 | if (err) | |
626 | goto out_drop_skcipher; | |
627 | ||
628 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | |
629 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | |
630 | ||
631 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | |
632 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); | |
633 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | |
634 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | |
635 | ||
636 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); | |
637 | ||
638 | inst->alg.init = cryptd_skcipher_init_tfm; | |
639 | inst->alg.exit = cryptd_skcipher_exit_tfm; | |
640 | ||
641 | inst->alg.setkey = cryptd_skcipher_setkey; | |
642 | inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; | |
643 | inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; | |
644 | ||
645 | inst->free = cryptd_skcipher_free; | |
646 | ||
647 | err = skcipher_register_instance(tmpl, inst); | |
648 | if (err) { | |
649 | out_drop_skcipher: | |
650 | crypto_drop_skcipher(&ctx->spawn); | |
651 | out_free_inst: | |
652 | kfree(inst); | |
653 | } | |
654 | return err; | |
655 | } | |
656 | ||
b8a28251 LH |
657 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
658 | { | |
659 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
46309d89 HX |
660 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
661 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
b8a28251 | 662 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
46309d89 | 663 | struct crypto_shash *hash; |
b8a28251 | 664 | |
46309d89 HX |
665 | hash = crypto_spawn_shash(spawn); |
666 | if (IS_ERR(hash)) | |
667 | return PTR_ERR(hash); | |
b8a28251 | 668 | |
46309d89 | 669 | ctx->child = hash; |
0d6669e2 HX |
670 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
671 | sizeof(struct cryptd_hash_request_ctx) + | |
672 | crypto_shash_descsize(hash)); | |
b8a28251 LH |
673 | return 0; |
674 | } | |
675 | ||
676 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
677 | { | |
678 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
b8a28251 | 679 | |
46309d89 | 680 | crypto_free_shash(ctx->child); |
b8a28251 LH |
681 | } |
682 | ||
683 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
684 | const u8 *key, unsigned int keylen) | |
685 | { | |
686 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
46309d89 | 687 | struct crypto_shash *child = ctx->child; |
b8a28251 LH |
688 | int err; |
689 | ||
46309d89 HX |
690 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
691 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
692 | CRYPTO_TFM_REQ_MASK); | |
693 | err = crypto_shash_setkey(child, key, keylen); | |
694 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
695 | CRYPTO_TFM_RES_MASK); | |
b8a28251 LH |
696 | return err; |
697 | } | |
698 | ||
699 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
3e3dc25f | 700 | crypto_completion_t compl) |
b8a28251 LH |
701 | { |
702 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
703 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
254eff77 HY |
704 | struct cryptd_queue *queue = |
705 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | |
b8a28251 LH |
706 | |
707 | rctx->complete = req->base.complete; | |
3e3dc25f | 708 | req->base.complete = compl; |
b8a28251 | 709 | |
254eff77 | 710 | return cryptd_enqueue_request(queue, &req->base); |
b8a28251 LH |
711 | } |
712 | ||
81760ea6 HX |
713 | static void cryptd_hash_complete(struct ahash_request *req, int err) |
714 | { | |
715 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
716 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
717 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
718 | int refcnt = atomic_read(&ctx->refcnt); | |
719 | ||
720 | local_bh_disable(); | |
721 | rctx->complete(&req->base, err); | |
722 | local_bh_enable(); | |
723 | ||
724 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
725 | crypto_free_ahash(tfm); | |
726 | } | |
727 | ||
b8a28251 LH |
728 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
729 | { | |
46309d89 HX |
730 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
731 | struct crypto_shash *child = ctx->child; | |
732 | struct ahash_request *req = ahash_request_cast(req_async); | |
733 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
734 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
735 | |
736 | if (unlikely(err == -EINPROGRESS)) | |
737 | goto out; | |
738 | ||
46309d89 HX |
739 | desc->tfm = child; |
740 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 741 | |
46309d89 | 742 | err = crypto_shash_init(desc); |
b8a28251 LH |
743 | |
744 | req->base.complete = rctx->complete; | |
745 | ||
746 | out: | |
81760ea6 | 747 | cryptd_hash_complete(req, err); |
b8a28251 LH |
748 | } |
749 | ||
750 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
751 | { | |
752 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
753 | } | |
754 | ||
755 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
756 | { | |
46309d89 | 757 | struct ahash_request *req = ahash_request_cast(req_async); |
b8a28251 | 758 | struct cryptd_hash_request_ctx *rctx; |
b8a28251 LH |
759 | |
760 | rctx = ahash_request_ctx(req); | |
761 | ||
762 | if (unlikely(err == -EINPROGRESS)) | |
763 | goto out; | |
764 | ||
46309d89 | 765 | err = shash_ahash_update(req, &rctx->desc); |
b8a28251 LH |
766 | |
767 | req->base.complete = rctx->complete; | |
768 | ||
769 | out: | |
81760ea6 | 770 | cryptd_hash_complete(req, err); |
b8a28251 LH |
771 | } |
772 | ||
773 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
774 | { | |
775 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
776 | } | |
777 | ||
778 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
779 | { | |
46309d89 HX |
780 | struct ahash_request *req = ahash_request_cast(req_async); |
781 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
b8a28251 LH |
782 | |
783 | if (unlikely(err == -EINPROGRESS)) | |
784 | goto out; | |
785 | ||
46309d89 | 786 | err = crypto_shash_final(&rctx->desc, req->result); |
b8a28251 LH |
787 | |
788 | req->base.complete = rctx->complete; | |
789 | ||
790 | out: | |
81760ea6 | 791 | cryptd_hash_complete(req, err); |
b8a28251 LH |
792 | } |
793 | ||
794 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
795 | { | |
796 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
797 | } | |
798 | ||
6fba00d1 HX |
799 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
800 | { | |
801 | struct ahash_request *req = ahash_request_cast(req_async); | |
802 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
803 | ||
804 | if (unlikely(err == -EINPROGRESS)) | |
805 | goto out; | |
806 | ||
807 | err = shash_ahash_finup(req, &rctx->desc); | |
808 | ||
809 | req->base.complete = rctx->complete; | |
810 | ||
811 | out: | |
81760ea6 | 812 | cryptd_hash_complete(req, err); |
6fba00d1 HX |
813 | } |
814 | ||
815 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | |
816 | { | |
817 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | |
818 | } | |
819 | ||
b8a28251 LH |
820 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
821 | { | |
46309d89 HX |
822 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
823 | struct crypto_shash *child = ctx->child; | |
824 | struct ahash_request *req = ahash_request_cast(req_async); | |
825 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
826 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
827 | |
828 | if (unlikely(err == -EINPROGRESS)) | |
829 | goto out; | |
830 | ||
46309d89 HX |
831 | desc->tfm = child; |
832 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 833 | |
46309d89 | 834 | err = shash_ahash_digest(req, desc); |
b8a28251 LH |
835 | |
836 | req->base.complete = rctx->complete; | |
837 | ||
838 | out: | |
81760ea6 | 839 | cryptd_hash_complete(req, err); |
b8a28251 LH |
840 | } |
841 | ||
842 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
843 | { | |
844 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
845 | } | |
846 | ||
6fba00d1 HX |
847 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
848 | { | |
849 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
850 | ||
851 | return crypto_shash_export(&rctx->desc, out); | |
852 | } | |
853 | ||
854 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | |
855 | { | |
0bd22235 AB |
856 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
857 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
858 | struct shash_desc *desc = cryptd_shash_desc(req); | |
859 | ||
860 | desc->tfm = ctx->child; | |
861 | desc->flags = req->base.flags; | |
6fba00d1 | 862 | |
0bd22235 | 863 | return crypto_shash_import(desc, in); |
6fba00d1 HX |
864 | } |
865 | ||
9cd899a3 HX |
866 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
867 | struct cryptd_queue *queue) | |
b8a28251 | 868 | { |
46309d89 | 869 | struct hashd_instance_ctx *ctx; |
0b535adf | 870 | struct ahash_instance *inst; |
46309d89 | 871 | struct shash_alg *salg; |
b8a28251 | 872 | struct crypto_alg *alg; |
466a7b9e SM |
873 | u32 type = 0; |
874 | u32 mask = 0; | |
46309d89 | 875 | int err; |
b8a28251 | 876 | |
466a7b9e SM |
877 | cryptd_check_internal(tb, &type, &mask); |
878 | ||
879 | salg = shash_attr_alg(tb[1], type, mask); | |
46309d89 | 880 | if (IS_ERR(salg)) |
9cd899a3 | 881 | return PTR_ERR(salg); |
b8a28251 | 882 | |
46309d89 | 883 | alg = &salg->base; |
0b535adf HX |
884 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
885 | sizeof(*ctx)); | |
05ed8758 | 886 | err = PTR_ERR(inst); |
b8a28251 LH |
887 | if (IS_ERR(inst)) |
888 | goto out_put_alg; | |
889 | ||
0b535adf | 890 | ctx = ahash_instance_ctx(inst); |
46309d89 HX |
891 | ctx->queue = queue; |
892 | ||
0b535adf HX |
893 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
894 | ahash_crypto_instance(inst)); | |
46309d89 HX |
895 | if (err) |
896 | goto out_free_inst; | |
897 | ||
466a7b9e SM |
898 | type = CRYPTO_ALG_ASYNC; |
899 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
900 | type |= CRYPTO_ALG_INTERNAL; | |
901 | inst->alg.halg.base.cra_flags = type; | |
b8a28251 | 902 | |
0b535adf | 903 | inst->alg.halg.digestsize = salg->digestsize; |
1a078340 | 904 | inst->alg.halg.statesize = salg->statesize; |
0b535adf | 905 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
b8a28251 | 906 | |
0b535adf HX |
907 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
908 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | |
b8a28251 | 909 | |
0b535adf HX |
910 | inst->alg.init = cryptd_hash_init_enqueue; |
911 | inst->alg.update = cryptd_hash_update_enqueue; | |
912 | inst->alg.final = cryptd_hash_final_enqueue; | |
6fba00d1 HX |
913 | inst->alg.finup = cryptd_hash_finup_enqueue; |
914 | inst->alg.export = cryptd_hash_export; | |
915 | inst->alg.import = cryptd_hash_import; | |
0b535adf HX |
916 | inst->alg.setkey = cryptd_hash_setkey; |
917 | inst->alg.digest = cryptd_hash_digest_enqueue; | |
b8a28251 | 918 | |
0b535adf | 919 | err = ahash_register_instance(tmpl, inst); |
9cd899a3 HX |
920 | if (err) { |
921 | crypto_drop_shash(&ctx->spawn); | |
922 | out_free_inst: | |
923 | kfree(inst); | |
924 | } | |
925 | ||
b8a28251 LH |
926 | out_put_alg: |
927 | crypto_mod_put(alg); | |
9cd899a3 | 928 | return err; |
b8a28251 LH |
929 | } |
930 | ||
92b9876b HX |
931 | static int cryptd_aead_setkey(struct crypto_aead *parent, |
932 | const u8 *key, unsigned int keylen) | |
933 | { | |
934 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | |
935 | struct crypto_aead *child = ctx->child; | |
936 | ||
937 | return crypto_aead_setkey(child, key, keylen); | |
938 | } | |
939 | ||
940 | static int cryptd_aead_setauthsize(struct crypto_aead *parent, | |
941 | unsigned int authsize) | |
942 | { | |
943 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | |
944 | struct crypto_aead *child = ctx->child; | |
945 | ||
946 | return crypto_aead_setauthsize(child, authsize); | |
947 | } | |
948 | ||
298c926c AH |
949 | static void cryptd_aead_crypt(struct aead_request *req, |
950 | struct crypto_aead *child, | |
951 | int err, | |
952 | int (*crypt)(struct aead_request *req)) | |
953 | { | |
954 | struct cryptd_aead_request_ctx *rctx; | |
81760ea6 | 955 | struct cryptd_aead_ctx *ctx; |
ec9f2006 | 956 | crypto_completion_t compl; |
81760ea6 HX |
957 | struct crypto_aead *tfm; |
958 | int refcnt; | |
ec9f2006 | 959 | |
298c926c | 960 | rctx = aead_request_ctx(req); |
ec9f2006 | 961 | compl = rctx->complete; |
298c926c | 962 | |
31bd44e7 HX |
963 | tfm = crypto_aead_reqtfm(req); |
964 | ||
298c926c AH |
965 | if (unlikely(err == -EINPROGRESS)) |
966 | goto out; | |
967 | aead_request_set_tfm(req, child); | |
968 | err = crypt( req ); | |
81760ea6 | 969 | |
298c926c | 970 | out: |
81760ea6 HX |
971 | ctx = crypto_aead_ctx(tfm); |
972 | refcnt = atomic_read(&ctx->refcnt); | |
973 | ||
298c926c | 974 | local_bh_disable(); |
ec9f2006 | 975 | compl(&req->base, err); |
298c926c | 976 | local_bh_enable(); |
81760ea6 HX |
977 | |
978 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
979 | crypto_free_aead(tfm); | |
298c926c AH |
980 | } |
981 | ||
982 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | |
983 | { | |
984 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | |
985 | struct crypto_aead *child = ctx->child; | |
986 | struct aead_request *req; | |
987 | ||
988 | req = container_of(areq, struct aead_request, base); | |
ba3749a7 | 989 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); |
298c926c AH |
990 | } |
991 | ||
992 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | |
993 | { | |
994 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | |
995 | struct crypto_aead *child = ctx->child; | |
996 | struct aead_request *req; | |
997 | ||
998 | req = container_of(areq, struct aead_request, base); | |
ba3749a7 | 999 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); |
298c926c AH |
1000 | } |
1001 | ||
1002 | static int cryptd_aead_enqueue(struct aead_request *req, | |
3e3dc25f | 1003 | crypto_completion_t compl) |
298c926c AH |
1004 | { |
1005 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | |
1006 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1007 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | |
1008 | ||
1009 | rctx->complete = req->base.complete; | |
3e3dc25f | 1010 | req->base.complete = compl; |
298c926c AH |
1011 | return cryptd_enqueue_request(queue, &req->base); |
1012 | } | |
1013 | ||
1014 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | |
1015 | { | |
1016 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | |
1017 | } | |
1018 | ||
1019 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | |
1020 | { | |
1021 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | |
1022 | } | |
1023 | ||
f614e546 | 1024 | static int cryptd_aead_init_tfm(struct crypto_aead *tfm) |
298c926c | 1025 | { |
f614e546 HX |
1026 | struct aead_instance *inst = aead_alg_instance(tfm); |
1027 | struct aead_instance_ctx *ictx = aead_instance_ctx(inst); | |
298c926c | 1028 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; |
f614e546 | 1029 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
298c926c AH |
1030 | struct crypto_aead *cipher; |
1031 | ||
1032 | cipher = crypto_spawn_aead(spawn); | |
1033 | if (IS_ERR(cipher)) | |
1034 | return PTR_ERR(cipher); | |
1035 | ||
298c926c | 1036 | ctx->child = cipher; |
ec9f2006 HX |
1037 | crypto_aead_set_reqsize( |
1038 | tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), | |
1039 | crypto_aead_reqsize(cipher))); | |
298c926c AH |
1040 | return 0; |
1041 | } | |
1042 | ||
f614e546 | 1043 | static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) |
298c926c | 1044 | { |
f614e546 | 1045 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
298c926c AH |
1046 | crypto_free_aead(ctx->child); |
1047 | } | |
1048 | ||
1049 | static int cryptd_create_aead(struct crypto_template *tmpl, | |
1050 | struct rtattr **tb, | |
1051 | struct cryptd_queue *queue) | |
1052 | { | |
1053 | struct aead_instance_ctx *ctx; | |
f614e546 HX |
1054 | struct aead_instance *inst; |
1055 | struct aead_alg *alg; | |
9b8c456e HX |
1056 | const char *name; |
1057 | u32 type = 0; | |
ec9f2006 | 1058 | u32 mask = CRYPTO_ALG_ASYNC; |
298c926c AH |
1059 | int err; |
1060 | ||
466a7b9e SM |
1061 | cryptd_check_internal(tb, &type, &mask); |
1062 | ||
9b8c456e HX |
1063 | name = crypto_attr_alg_name(tb[1]); |
1064 | if (IS_ERR(name)) | |
1065 | return PTR_ERR(name); | |
298c926c | 1066 | |
9b8c456e HX |
1067 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
1068 | if (!inst) | |
1069 | return -ENOMEM; | |
298c926c | 1070 | |
f614e546 | 1071 | ctx = aead_instance_ctx(inst); |
298c926c AH |
1072 | ctx->queue = queue; |
1073 | ||
f614e546 | 1074 | crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); |
9b8c456e | 1075 | err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); |
298c926c AH |
1076 | if (err) |
1077 | goto out_free_inst; | |
1078 | ||
f614e546 HX |
1079 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); |
1080 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); | |
9b8c456e HX |
1081 | if (err) |
1082 | goto out_drop_aead; | |
1083 | ||
f614e546 | 1084 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
5e4b8c1f | 1085 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
f614e546 | 1086 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
298c926c | 1087 | |
f614e546 HX |
1088 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
1089 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | |
1090 | ||
1091 | inst->alg.init = cryptd_aead_init_tfm; | |
1092 | inst->alg.exit = cryptd_aead_exit_tfm; | |
1093 | inst->alg.setkey = cryptd_aead_setkey; | |
1094 | inst->alg.setauthsize = cryptd_aead_setauthsize; | |
1095 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; | |
1096 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; | |
1097 | ||
1098 | err = aead_register_instance(tmpl, inst); | |
298c926c | 1099 | if (err) { |
9b8c456e HX |
1100 | out_drop_aead: |
1101 | crypto_drop_aead(&ctx->aead_spawn); | |
298c926c AH |
1102 | out_free_inst: |
1103 | kfree(inst); | |
1104 | } | |
298c926c AH |
1105 | return err; |
1106 | } | |
1107 | ||
254eff77 | 1108 | static struct cryptd_queue queue; |
124b53d0 | 1109 | |
9cd899a3 | 1110 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
124b53d0 HX |
1111 | { |
1112 | struct crypto_attr_type *algt; | |
1113 | ||
1114 | algt = crypto_get_attr_type(tb); | |
1115 | if (IS_ERR(algt)) | |
9cd899a3 | 1116 | return PTR_ERR(algt); |
124b53d0 HX |
1117 | |
1118 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
1119 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
4e0958d1 HX |
1120 | if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
1121 | CRYPTO_ALG_TYPE_BLKCIPHER) | |
1122 | return cryptd_create_blkcipher(tmpl, tb, &queue); | |
1123 | ||
1124 | return cryptd_create_skcipher(tmpl, tb, &queue); | |
b8a28251 | 1125 | case CRYPTO_ALG_TYPE_DIGEST: |
9cd899a3 | 1126 | return cryptd_create_hash(tmpl, tb, &queue); |
298c926c AH |
1127 | case CRYPTO_ALG_TYPE_AEAD: |
1128 | return cryptd_create_aead(tmpl, tb, &queue); | |
124b53d0 HX |
1129 | } |
1130 | ||
9cd899a3 | 1131 | return -EINVAL; |
124b53d0 HX |
1132 | } |
1133 | ||
1134 | static void cryptd_free(struct crypto_instance *inst) | |
1135 | { | |
1136 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
0b535adf | 1137 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
298c926c | 1138 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
0b535adf HX |
1139 | |
1140 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
1141 | case CRYPTO_ALG_TYPE_AHASH: | |
1142 | crypto_drop_shash(&hctx->spawn); | |
1143 | kfree(ahash_instance(inst)); | |
1144 | return; | |
298c926c | 1145 | case CRYPTO_ALG_TYPE_AEAD: |
f614e546 HX |
1146 | crypto_drop_aead(&aead_ctx->aead_spawn); |
1147 | kfree(aead_instance(inst)); | |
298c926c AH |
1148 | return; |
1149 | default: | |
1150 | crypto_drop_spawn(&ctx->spawn); | |
1151 | kfree(inst); | |
0b535adf | 1152 | } |
124b53d0 HX |
1153 | } |
1154 | ||
1155 | static struct crypto_template cryptd_tmpl = { | |
1156 | .name = "cryptd", | |
9cd899a3 | 1157 | .create = cryptd_create, |
124b53d0 HX |
1158 | .free = cryptd_free, |
1159 | .module = THIS_MODULE, | |
1160 | }; | |
1161 | ||
1cac2cbc HY |
1162 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
1163 | u32 type, u32 mask) | |
1164 | { | |
1165 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1166 | struct cryptd_blkcipher_ctx *ctx; |
505fd21d | 1167 | struct crypto_tfm *tfm; |
1cac2cbc HY |
1168 | |
1169 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1170 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1171 | return ERR_PTR(-EINVAL); | |
c012a79d | 1172 | type = crypto_skcipher_type(type); |
505fd21d HY |
1173 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
1174 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | |
1175 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | |
1cac2cbc HY |
1176 | if (IS_ERR(tfm)) |
1177 | return ERR_CAST(tfm); | |
505fd21d HY |
1178 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
1179 | crypto_free_tfm(tfm); | |
1cac2cbc HY |
1180 | return ERR_PTR(-EINVAL); |
1181 | } | |
1182 | ||
81760ea6 HX |
1183 | ctx = crypto_tfm_ctx(tfm); |
1184 | atomic_set(&ctx->refcnt, 1); | |
1185 | ||
505fd21d | 1186 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
1cac2cbc HY |
1187 | } |
1188 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
1189 | ||
1190 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
1191 | { | |
1192 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
1193 | return ctx->child; | |
1194 | } | |
1195 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
1196 | ||
81760ea6 HX |
1197 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) |
1198 | { | |
1199 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
1200 | ||
1201 | return atomic_read(&ctx->refcnt) - 1; | |
1202 | } | |
1203 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); | |
1204 | ||
1cac2cbc HY |
1205 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
1206 | { | |
81760ea6 HX |
1207 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
1208 | ||
1209 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1210 | crypto_free_ablkcipher(&tfm->base); | |
1cac2cbc HY |
1211 | } |
1212 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
1213 | ||
4e0958d1 HX |
1214 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, |
1215 | u32 type, u32 mask) | |
1216 | { | |
1217 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
1218 | struct cryptd_skcipher_ctx *ctx; | |
1219 | struct crypto_skcipher *tfm; | |
1220 | ||
1221 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1222 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1223 | return ERR_PTR(-EINVAL); | |
1224 | ||
1225 | tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); | |
1226 | if (IS_ERR(tfm)) | |
1227 | return ERR_CAST(tfm); | |
1228 | ||
1229 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1230 | crypto_free_skcipher(tfm); | |
1231 | return ERR_PTR(-EINVAL); | |
1232 | } | |
1233 | ||
1234 | ctx = crypto_skcipher_ctx(tfm); | |
1235 | atomic_set(&ctx->refcnt, 1); | |
1236 | ||
1237 | return container_of(tfm, struct cryptd_skcipher, base); | |
1238 | } | |
1239 | EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); | |
1240 | ||
1241 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) | |
1242 | { | |
1243 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1244 | ||
1245 | return ctx->child; | |
1246 | } | |
1247 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); | |
1248 | ||
1249 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) | |
1250 | { | |
1251 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1252 | ||
1253 | return atomic_read(&ctx->refcnt) - 1; | |
1254 | } | |
1255 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); | |
1256 | ||
1257 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm) | |
1258 | { | |
1259 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1260 | ||
1261 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1262 | crypto_free_skcipher(&tfm->base); | |
1263 | } | |
1264 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); | |
1265 | ||
ace13663 HY |
1266 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
1267 | u32 type, u32 mask) | |
1268 | { | |
1269 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1270 | struct cryptd_hash_ctx *ctx; |
ace13663 HY |
1271 | struct crypto_ahash *tfm; |
1272 | ||
1273 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1274 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1275 | return ERR_PTR(-EINVAL); | |
1276 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | |
1277 | if (IS_ERR(tfm)) | |
1278 | return ERR_CAST(tfm); | |
1279 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1280 | crypto_free_ahash(tfm); | |
1281 | return ERR_PTR(-EINVAL); | |
1282 | } | |
1283 | ||
81760ea6 HX |
1284 | ctx = crypto_ahash_ctx(tfm); |
1285 | atomic_set(&ctx->refcnt, 1); | |
1286 | ||
ace13663 HY |
1287 | return __cryptd_ahash_cast(tfm); |
1288 | } | |
1289 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | |
1290 | ||
1291 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | |
1292 | { | |
1293 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
1294 | ||
1295 | return ctx->child; | |
1296 | } | |
1297 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | |
1298 | ||
0e1227d3 HY |
1299 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
1300 | { | |
1301 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
1302 | return &rctx->desc; | |
1303 | } | |
1304 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | |
1305 | ||
81760ea6 HX |
1306 | bool cryptd_ahash_queued(struct cryptd_ahash *tfm) |
1307 | { | |
1308 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
1309 | ||
1310 | return atomic_read(&ctx->refcnt) - 1; | |
1311 | } | |
1312 | EXPORT_SYMBOL_GPL(cryptd_ahash_queued); | |
1313 | ||
ace13663 HY |
1314 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
1315 | { | |
81760ea6 HX |
1316 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
1317 | ||
1318 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1319 | crypto_free_ahash(&tfm->base); | |
ace13663 HY |
1320 | } |
1321 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | |
1322 | ||
298c926c AH |
1323 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, |
1324 | u32 type, u32 mask) | |
1325 | { | |
1326 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1327 | struct cryptd_aead_ctx *ctx; |
298c926c AH |
1328 | struct crypto_aead *tfm; |
1329 | ||
1330 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1331 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1332 | return ERR_PTR(-EINVAL); | |
1333 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | |
1334 | if (IS_ERR(tfm)) | |
1335 | return ERR_CAST(tfm); | |
1336 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1337 | crypto_free_aead(tfm); | |
1338 | return ERR_PTR(-EINVAL); | |
1339 | } | |
81760ea6 HX |
1340 | |
1341 | ctx = crypto_aead_ctx(tfm); | |
1342 | atomic_set(&ctx->refcnt, 1); | |
1343 | ||
298c926c AH |
1344 | return __cryptd_aead_cast(tfm); |
1345 | } | |
1346 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | |
1347 | ||
1348 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | |
1349 | { | |
1350 | struct cryptd_aead_ctx *ctx; | |
1351 | ctx = crypto_aead_ctx(&tfm->base); | |
1352 | return ctx->child; | |
1353 | } | |
1354 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | |
1355 | ||
81760ea6 HX |
1356 | bool cryptd_aead_queued(struct cryptd_aead *tfm) |
1357 | { | |
1358 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); | |
1359 | ||
1360 | return atomic_read(&ctx->refcnt) - 1; | |
1361 | } | |
1362 | EXPORT_SYMBOL_GPL(cryptd_aead_queued); | |
1363 | ||
298c926c AH |
1364 | void cryptd_free_aead(struct cryptd_aead *tfm) |
1365 | { | |
81760ea6 HX |
1366 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
1367 | ||
1368 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1369 | crypto_free_aead(&tfm->base); | |
298c926c AH |
1370 | } |
1371 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | |
1372 | ||
124b53d0 HX |
1373 | static int __init cryptd_init(void) |
1374 | { | |
1375 | int err; | |
1376 | ||
254eff77 | 1377 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
124b53d0 HX |
1378 | if (err) |
1379 | return err; | |
1380 | ||
1381 | err = crypto_register_template(&cryptd_tmpl); | |
1382 | if (err) | |
254eff77 | 1383 | cryptd_fini_queue(&queue); |
124b53d0 HX |
1384 | |
1385 | return err; | |
1386 | } | |
1387 | ||
1388 | static void __exit cryptd_exit(void) | |
1389 | { | |
254eff77 | 1390 | cryptd_fini_queue(&queue); |
124b53d0 HX |
1391 | crypto_unregister_template(&cryptd_tmpl); |
1392 | } | |
1393 | ||
b2bac6ac | 1394 | subsys_initcall(cryptd_init); |
124b53d0 HX |
1395 | module_exit(cryptd_exit); |
1396 | ||
1397 | MODULE_LICENSE("GPL"); | |
1398 | MODULE_DESCRIPTION("Software async crypto daemon"); | |
4943ba16 | 1399 | MODULE_ALIAS_CRYPTO("cryptd"); |