]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
18e33e6d | 14 | #include <crypto/internal/hash.h> |
124b53d0 HX |
15 | #include <linux/err.h> |
16 | #include <linux/init.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kthread.h> | |
19 | #include <linux/list.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/mutex.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #define CRYPTD_MAX_QLEN 100 | |
28 | ||
29 | struct cryptd_state { | |
30 | spinlock_t lock; | |
31 | struct mutex mutex; | |
32 | struct crypto_queue queue; | |
33 | struct task_struct *task; | |
34 | }; | |
35 | ||
36 | struct cryptd_instance_ctx { | |
37 | struct crypto_spawn spawn; | |
38 | struct cryptd_state *state; | |
39 | }; | |
40 | ||
41 | struct cryptd_blkcipher_ctx { | |
42 | struct crypto_blkcipher *child; | |
43 | }; | |
44 | ||
45 | struct cryptd_blkcipher_request_ctx { | |
46 | crypto_completion_t complete; | |
47 | }; | |
48 | ||
b8a28251 LH |
49 | struct cryptd_hash_ctx { |
50 | struct crypto_hash *child; | |
51 | }; | |
52 | ||
53 | struct cryptd_hash_request_ctx { | |
54 | crypto_completion_t complete; | |
55 | }; | |
124b53d0 HX |
56 | |
57 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | |
58 | { | |
59 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
60 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
61 | return ictx->state; | |
62 | } | |
63 | ||
64 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
65 | const u8 *key, unsigned int keylen) | |
66 | { | |
67 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
68 | struct crypto_blkcipher *child = ctx->child; | |
69 | int err; | |
70 | ||
71 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
72 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
73 | CRYPTO_TFM_REQ_MASK); | |
74 | err = crypto_blkcipher_setkey(child, key, keylen); | |
75 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
76 | CRYPTO_TFM_RES_MASK); | |
77 | return err; | |
78 | } | |
79 | ||
80 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
81 | struct crypto_blkcipher *child, | |
82 | int err, | |
83 | int (*crypt)(struct blkcipher_desc *desc, | |
84 | struct scatterlist *dst, | |
85 | struct scatterlist *src, | |
86 | unsigned int len)) | |
87 | { | |
88 | struct cryptd_blkcipher_request_ctx *rctx; | |
89 | struct blkcipher_desc desc; | |
90 | ||
91 | rctx = ablkcipher_request_ctx(req); | |
92 | ||
93aa7f8a HX |
93 | if (unlikely(err == -EINPROGRESS)) |
94 | goto out; | |
124b53d0 HX |
95 | |
96 | desc.tfm = child; | |
97 | desc.info = req->info; | |
98 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
99 | ||
100 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
101 | ||
102 | req->base.complete = rctx->complete; | |
103 | ||
93aa7f8a | 104 | out: |
124b53d0 | 105 | local_bh_disable(); |
93aa7f8a | 106 | rctx->complete(&req->base, err); |
124b53d0 HX |
107 | local_bh_enable(); |
108 | } | |
109 | ||
110 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
111 | { | |
112 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
113 | struct crypto_blkcipher *child = ctx->child; | |
114 | ||
115 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
116 | crypto_blkcipher_crt(child)->encrypt); | |
117 | } | |
118 | ||
119 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
120 | { | |
121 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
122 | struct crypto_blkcipher *child = ctx->child; | |
123 | ||
124 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
125 | crypto_blkcipher_crt(child)->decrypt); | |
126 | } | |
127 | ||
128 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
129 | crypto_completion_t complete) | |
130 | { | |
131 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
132 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
133 | struct cryptd_state *state = | |
134 | cryptd_get_state(crypto_ablkcipher_tfm(tfm)); | |
135 | int err; | |
136 | ||
137 | rctx->complete = req->base.complete; | |
138 | req->base.complete = complete; | |
139 | ||
140 | spin_lock_bh(&state->lock); | |
2de98e75 | 141 | err = ablkcipher_enqueue_request(&state->queue, req); |
124b53d0 HX |
142 | spin_unlock_bh(&state->lock); |
143 | ||
144 | wake_up_process(state->task); | |
145 | return err; | |
146 | } | |
147 | ||
148 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
149 | { | |
150 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
151 | } | |
152 | ||
153 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
154 | { | |
155 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
156 | } | |
157 | ||
158 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
159 | { | |
160 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
161 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
162 | struct crypto_spawn *spawn = &ictx->spawn; | |
163 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
164 | struct crypto_blkcipher *cipher; | |
165 | ||
166 | cipher = crypto_spawn_blkcipher(spawn); | |
167 | if (IS_ERR(cipher)) | |
168 | return PTR_ERR(cipher); | |
169 | ||
170 | ctx->child = cipher; | |
171 | tfm->crt_ablkcipher.reqsize = | |
172 | sizeof(struct cryptd_blkcipher_request_ctx); | |
173 | return 0; | |
174 | } | |
175 | ||
176 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
177 | { | |
178 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
179 | struct cryptd_state *state = cryptd_get_state(tfm); | |
180 | int active; | |
181 | ||
182 | mutex_lock(&state->mutex); | |
2de98e75 HX |
183 | active = ablkcipher_tfm_in_queue(&state->queue, |
184 | __crypto_ablkcipher_cast(tfm)); | |
124b53d0 HX |
185 | mutex_unlock(&state->mutex); |
186 | ||
187 | BUG_ON(active); | |
188 | ||
189 | crypto_free_blkcipher(ctx->child); | |
190 | } | |
191 | ||
192 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
193 | struct cryptd_state *state) | |
194 | { | |
195 | struct crypto_instance *inst; | |
196 | struct cryptd_instance_ctx *ctx; | |
197 | int err; | |
198 | ||
199 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
b1145ce3 JL |
200 | if (!inst) { |
201 | inst = ERR_PTR(-ENOMEM); | |
124b53d0 | 202 | goto out; |
b1145ce3 | 203 | } |
124b53d0 HX |
204 | |
205 | err = -ENAMETOOLONG; | |
206 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
207 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
208 | goto out_free_inst; | |
209 | ||
210 | ctx = crypto_instance_ctx(inst); | |
211 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
212 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
213 | if (err) | |
214 | goto out_free_inst; | |
215 | ||
216 | ctx->state = state; | |
217 | ||
218 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
219 | ||
220 | inst->alg.cra_priority = alg->cra_priority + 50; | |
221 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
222 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
223 | ||
224 | out: | |
225 | return inst; | |
226 | ||
227 | out_free_inst: | |
228 | kfree(inst); | |
229 | inst = ERR_PTR(err); | |
230 | goto out; | |
231 | } | |
232 | ||
233 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
234 | struct rtattr **tb, struct cryptd_state *state) | |
235 | { | |
236 | struct crypto_instance *inst; | |
237 | struct crypto_alg *alg; | |
238 | ||
239 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 240 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 241 | if (IS_ERR(alg)) |
e231c2ee | 242 | return ERR_CAST(alg); |
124b53d0 HX |
243 | |
244 | inst = cryptd_alloc_instance(alg, state); | |
245 | if (IS_ERR(inst)) | |
246 | goto out_put_alg; | |
247 | ||
332f8840 | 248 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
249 | inst->alg.cra_type = &crypto_ablkcipher_type; |
250 | ||
251 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
252 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
253 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
254 | ||
927eead5 HX |
255 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
256 | ||
124b53d0 HX |
257 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
258 | ||
259 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
260 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
261 | ||
262 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
263 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
264 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
265 | ||
124b53d0 HX |
266 | out_put_alg: |
267 | crypto_mod_put(alg); | |
268 | return inst; | |
269 | } | |
270 | ||
b8a28251 LH |
271 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
272 | { | |
273 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
274 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
275 | struct crypto_spawn *spawn = &ictx->spawn; | |
276 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
277 | struct crypto_hash *cipher; | |
278 | ||
279 | cipher = crypto_spawn_hash(spawn); | |
280 | if (IS_ERR(cipher)) | |
281 | return PTR_ERR(cipher); | |
282 | ||
283 | ctx->child = cipher; | |
284 | tfm->crt_ahash.reqsize = | |
285 | sizeof(struct cryptd_hash_request_ctx); | |
286 | return 0; | |
287 | } | |
288 | ||
289 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
290 | { | |
291 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
292 | struct cryptd_state *state = cryptd_get_state(tfm); | |
293 | int active; | |
294 | ||
295 | mutex_lock(&state->mutex); | |
296 | active = ahash_tfm_in_queue(&state->queue, | |
297 | __crypto_ahash_cast(tfm)); | |
298 | mutex_unlock(&state->mutex); | |
299 | ||
300 | BUG_ON(active); | |
301 | ||
302 | crypto_free_hash(ctx->child); | |
303 | } | |
304 | ||
305 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
306 | const u8 *key, unsigned int keylen) | |
307 | { | |
308 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
309 | struct crypto_hash *child = ctx->child; | |
310 | int err; | |
311 | ||
312 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
313 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | |
314 | CRYPTO_TFM_REQ_MASK); | |
315 | err = crypto_hash_setkey(child, key, keylen); | |
316 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | |
317 | CRYPTO_TFM_RES_MASK); | |
318 | return err; | |
319 | } | |
320 | ||
321 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
322 | crypto_completion_t complete) | |
323 | { | |
324 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
325 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
326 | struct cryptd_state *state = | |
327 | cryptd_get_state(crypto_ahash_tfm(tfm)); | |
328 | int err; | |
329 | ||
330 | rctx->complete = req->base.complete; | |
331 | req->base.complete = complete; | |
332 | ||
333 | spin_lock_bh(&state->lock); | |
334 | err = ahash_enqueue_request(&state->queue, req); | |
335 | spin_unlock_bh(&state->lock); | |
336 | ||
337 | wake_up_process(state->task); | |
338 | return err; | |
339 | } | |
340 | ||
341 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
342 | { | |
343 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
344 | struct crypto_hash *child = ctx->child; | |
345 | struct ahash_request *req = ahash_request_cast(req_async); | |
346 | struct cryptd_hash_request_ctx *rctx; | |
347 | struct hash_desc desc; | |
348 | ||
349 | rctx = ahash_request_ctx(req); | |
350 | ||
351 | if (unlikely(err == -EINPROGRESS)) | |
352 | goto out; | |
353 | ||
354 | desc.tfm = child; | |
355 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
356 | ||
357 | err = crypto_hash_crt(child)->init(&desc); | |
358 | ||
359 | req->base.complete = rctx->complete; | |
360 | ||
361 | out: | |
362 | local_bh_disable(); | |
363 | rctx->complete(&req->base, err); | |
364 | local_bh_enable(); | |
365 | } | |
366 | ||
367 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
368 | { | |
369 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
370 | } | |
371 | ||
372 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
373 | { | |
374 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
375 | struct crypto_hash *child = ctx->child; | |
376 | struct ahash_request *req = ahash_request_cast(req_async); | |
377 | struct cryptd_hash_request_ctx *rctx; | |
378 | struct hash_desc desc; | |
379 | ||
380 | rctx = ahash_request_ctx(req); | |
381 | ||
382 | if (unlikely(err == -EINPROGRESS)) | |
383 | goto out; | |
384 | ||
385 | desc.tfm = child; | |
386 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
387 | ||
388 | err = crypto_hash_crt(child)->update(&desc, | |
389 | req->src, | |
390 | req->nbytes); | |
391 | ||
392 | req->base.complete = rctx->complete; | |
393 | ||
394 | out: | |
395 | local_bh_disable(); | |
396 | rctx->complete(&req->base, err); | |
397 | local_bh_enable(); | |
398 | } | |
399 | ||
400 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
401 | { | |
402 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
403 | } | |
404 | ||
405 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
406 | { | |
407 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
408 | struct crypto_hash *child = ctx->child; | |
409 | struct ahash_request *req = ahash_request_cast(req_async); | |
410 | struct cryptd_hash_request_ctx *rctx; | |
411 | struct hash_desc desc; | |
412 | ||
413 | rctx = ahash_request_ctx(req); | |
414 | ||
415 | if (unlikely(err == -EINPROGRESS)) | |
416 | goto out; | |
417 | ||
418 | desc.tfm = child; | |
419 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
420 | ||
421 | err = crypto_hash_crt(child)->final(&desc, req->result); | |
422 | ||
423 | req->base.complete = rctx->complete; | |
424 | ||
425 | out: | |
426 | local_bh_disable(); | |
427 | rctx->complete(&req->base, err); | |
428 | local_bh_enable(); | |
429 | } | |
430 | ||
431 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
432 | { | |
433 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
434 | } | |
435 | ||
436 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
437 | { | |
438 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
439 | struct crypto_hash *child = ctx->child; | |
440 | struct ahash_request *req = ahash_request_cast(req_async); | |
441 | struct cryptd_hash_request_ctx *rctx; | |
442 | struct hash_desc desc; | |
443 | ||
444 | rctx = ahash_request_ctx(req); | |
445 | ||
446 | if (unlikely(err == -EINPROGRESS)) | |
447 | goto out; | |
448 | ||
449 | desc.tfm = child; | |
450 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
451 | ||
452 | err = crypto_hash_crt(child)->digest(&desc, | |
453 | req->src, | |
454 | req->nbytes, | |
455 | req->result); | |
456 | ||
457 | req->base.complete = rctx->complete; | |
458 | ||
459 | out: | |
460 | local_bh_disable(); | |
461 | rctx->complete(&req->base, err); | |
462 | local_bh_enable(); | |
463 | } | |
464 | ||
465 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
466 | { | |
467 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
468 | } | |
469 | ||
470 | static struct crypto_instance *cryptd_alloc_hash( | |
471 | struct rtattr **tb, struct cryptd_state *state) | |
472 | { | |
473 | struct crypto_instance *inst; | |
474 | struct crypto_alg *alg; | |
475 | ||
476 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | |
477 | CRYPTO_ALG_TYPE_HASH_MASK); | |
478 | if (IS_ERR(alg)) | |
479 | return ERR_PTR(PTR_ERR(alg)); | |
480 | ||
481 | inst = cryptd_alloc_instance(alg, state); | |
482 | if (IS_ERR(inst)) | |
483 | goto out_put_alg; | |
484 | ||
485 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | |
486 | inst->alg.cra_type = &crypto_ahash_type; | |
487 | ||
488 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | |
489 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | |
490 | ||
491 | inst->alg.cra_init = cryptd_hash_init_tfm; | |
492 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | |
493 | ||
494 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | |
495 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | |
496 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | |
497 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | |
498 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | |
499 | ||
500 | out_put_alg: | |
501 | crypto_mod_put(alg); | |
502 | return inst; | |
503 | } | |
504 | ||
124b53d0 HX |
505 | static struct cryptd_state state; |
506 | ||
507 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
508 | { | |
509 | struct crypto_attr_type *algt; | |
510 | ||
511 | algt = crypto_get_attr_type(tb); | |
512 | if (IS_ERR(algt)) | |
e231c2ee | 513 | return ERR_CAST(algt); |
124b53d0 HX |
514 | |
515 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
516 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
517 | return cryptd_alloc_blkcipher(tb, &state); | |
b8a28251 LH |
518 | case CRYPTO_ALG_TYPE_DIGEST: |
519 | return cryptd_alloc_hash(tb, &state); | |
124b53d0 HX |
520 | } |
521 | ||
522 | return ERR_PTR(-EINVAL); | |
523 | } | |
524 | ||
525 | static void cryptd_free(struct crypto_instance *inst) | |
526 | { | |
527 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
528 | ||
529 | crypto_drop_spawn(&ctx->spawn); | |
530 | kfree(inst); | |
531 | } | |
532 | ||
533 | static struct crypto_template cryptd_tmpl = { | |
534 | .name = "cryptd", | |
535 | .alloc = cryptd_alloc, | |
536 | .free = cryptd_free, | |
537 | .module = THIS_MODULE, | |
538 | }; | |
539 | ||
540 | static inline int cryptd_create_thread(struct cryptd_state *state, | |
541 | int (*fn)(void *data), const char *name) | |
542 | { | |
543 | spin_lock_init(&state->lock); | |
544 | mutex_init(&state->mutex); | |
545 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | |
546 | ||
189fe317 | 547 | state->task = kthread_run(fn, state, name); |
124b53d0 HX |
548 | if (IS_ERR(state->task)) |
549 | return PTR_ERR(state->task); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | static inline void cryptd_stop_thread(struct cryptd_state *state) | |
555 | { | |
556 | BUG_ON(state->queue.qlen); | |
557 | kthread_stop(state->task); | |
558 | } | |
559 | ||
560 | static int cryptd_thread(void *data) | |
561 | { | |
562 | struct cryptd_state *state = data; | |
563 | int stop; | |
564 | ||
189fe317 RW |
565 | current->flags |= PF_NOFREEZE; |
566 | ||
124b53d0 HX |
567 | do { |
568 | struct crypto_async_request *req, *backlog; | |
569 | ||
570 | mutex_lock(&state->mutex); | |
571 | __set_current_state(TASK_INTERRUPTIBLE); | |
572 | ||
573 | spin_lock_bh(&state->lock); | |
574 | backlog = crypto_get_backlog(&state->queue); | |
575 | req = crypto_dequeue_request(&state->queue); | |
576 | spin_unlock_bh(&state->lock); | |
577 | ||
578 | stop = kthread_should_stop(); | |
579 | ||
580 | if (stop || req) { | |
581 | __set_current_state(TASK_RUNNING); | |
582 | if (req) { | |
583 | if (backlog) | |
584 | backlog->complete(backlog, | |
585 | -EINPROGRESS); | |
586 | req->complete(req, 0); | |
587 | } | |
588 | } | |
589 | ||
590 | mutex_unlock(&state->mutex); | |
591 | ||
592 | schedule(); | |
593 | } while (!stop); | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
598 | static int __init cryptd_init(void) | |
599 | { | |
600 | int err; | |
601 | ||
602 | err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | |
603 | if (err) | |
604 | return err; | |
605 | ||
606 | err = crypto_register_template(&cryptd_tmpl); | |
607 | if (err) | |
608 | kthread_stop(state.task); | |
609 | ||
610 | return err; | |
611 | } | |
612 | ||
613 | static void __exit cryptd_exit(void) | |
614 | { | |
615 | cryptd_stop_thread(&state); | |
616 | crypto_unregister_template(&cryptd_tmpl); | |
617 | } | |
618 | ||
619 | module_init(cryptd_init); | |
620 | module_exit(cryptd_exit); | |
621 | ||
622 | MODULE_LICENSE("GPL"); | |
623 | MODULE_DESCRIPTION("Software async crypto daemon"); |