]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
18e33e6d | 14 | #include <crypto/internal/hash.h> |
1cac2cbc | 15 | #include <crypto/cryptd.h> |
254eff77 | 16 | #include <crypto/crypto_wq.h> |
124b53d0 HX |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> | |
19 | #include <linux/kernel.h> | |
124b53d0 HX |
20 | #include <linux/list.h> |
21 | #include <linux/module.h> | |
124b53d0 HX |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
124b53d0 | 25 | |
254eff77 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
124b53d0 | 27 | |
254eff77 | 28 | struct cryptd_cpu_queue { |
124b53d0 | 29 | struct crypto_queue queue; |
254eff77 HY |
30 | struct work_struct work; |
31 | }; | |
32 | ||
33 | struct cryptd_queue { | |
34 | struct cryptd_cpu_queue *cpu_queue; | |
124b53d0 HX |
35 | }; |
36 | ||
37 | struct cryptd_instance_ctx { | |
38 | struct crypto_spawn spawn; | |
254eff77 | 39 | struct cryptd_queue *queue; |
124b53d0 HX |
40 | }; |
41 | ||
46309d89 HX |
42 | struct hashd_instance_ctx { |
43 | struct crypto_shash_spawn spawn; | |
44 | struct cryptd_queue *queue; | |
45 | }; | |
46 | ||
124b53d0 HX |
47 | struct cryptd_blkcipher_ctx { |
48 | struct crypto_blkcipher *child; | |
49 | }; | |
50 | ||
51 | struct cryptd_blkcipher_request_ctx { | |
52 | crypto_completion_t complete; | |
53 | }; | |
54 | ||
b8a28251 | 55 | struct cryptd_hash_ctx { |
46309d89 | 56 | struct crypto_shash *child; |
b8a28251 LH |
57 | }; |
58 | ||
59 | struct cryptd_hash_request_ctx { | |
60 | crypto_completion_t complete; | |
46309d89 | 61 | struct shash_desc desc; |
b8a28251 | 62 | }; |
124b53d0 | 63 | |
254eff77 HY |
64 | static void cryptd_queue_worker(struct work_struct *work); |
65 | ||
66 | static int cryptd_init_queue(struct cryptd_queue *queue, | |
67 | unsigned int max_cpu_qlen) | |
68 | { | |
69 | int cpu; | |
70 | struct cryptd_cpu_queue *cpu_queue; | |
71 | ||
72 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | |
73 | if (!queue->cpu_queue) | |
74 | return -ENOMEM; | |
75 | for_each_possible_cpu(cpu) { | |
76 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
77 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
78 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | |
79 | } | |
80 | return 0; | |
81 | } | |
82 | ||
83 | static void cryptd_fini_queue(struct cryptd_queue *queue) | |
84 | { | |
85 | int cpu; | |
86 | struct cryptd_cpu_queue *cpu_queue; | |
87 | ||
88 | for_each_possible_cpu(cpu) { | |
89 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
90 | BUG_ON(cpu_queue->queue.qlen); | |
91 | } | |
92 | free_percpu(queue->cpu_queue); | |
93 | } | |
94 | ||
95 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | |
96 | struct crypto_async_request *request) | |
97 | { | |
98 | int cpu, err; | |
99 | struct cryptd_cpu_queue *cpu_queue; | |
100 | ||
101 | cpu = get_cpu(); | |
102 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
103 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
105 | put_cpu(); | |
106 | ||
107 | return err; | |
108 | } | |
109 | ||
110 | /* Called in workqueue context, do one real cryption work (via | |
111 | * req->complete) and reschedule itself if there are more work to | |
112 | * do. */ | |
113 | static void cryptd_queue_worker(struct work_struct *work) | |
114 | { | |
115 | struct cryptd_cpu_queue *cpu_queue; | |
116 | struct crypto_async_request *req, *backlog; | |
117 | ||
118 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | |
119 | /* Only handle one request at a time to avoid hogging crypto | |
120 | * workqueue. preempt_disable/enable is used to prevent | |
121 | * being preempted by cryptd_enqueue_request() */ | |
122 | preempt_disable(); | |
123 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
124 | req = crypto_dequeue_request(&cpu_queue->queue); | |
125 | preempt_enable(); | |
126 | ||
127 | if (!req) | |
128 | return; | |
129 | ||
130 | if (backlog) | |
131 | backlog->complete(backlog, -EINPROGRESS); | |
132 | req->complete(req, 0); | |
133 | ||
134 | if (cpu_queue->queue.qlen) | |
135 | queue_work(kcrypto_wq, &cpu_queue->work); | |
136 | } | |
137 | ||
138 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | |
124b53d0 HX |
139 | { |
140 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
141 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
254eff77 | 142 | return ictx->queue; |
124b53d0 HX |
143 | } |
144 | ||
145 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
146 | const u8 *key, unsigned int keylen) | |
147 | { | |
148 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
149 | struct crypto_blkcipher *child = ctx->child; | |
150 | int err; | |
151 | ||
152 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
153 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
154 | CRYPTO_TFM_REQ_MASK); | |
155 | err = crypto_blkcipher_setkey(child, key, keylen); | |
156 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
157 | CRYPTO_TFM_RES_MASK); | |
158 | return err; | |
159 | } | |
160 | ||
161 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
162 | struct crypto_blkcipher *child, | |
163 | int err, | |
164 | int (*crypt)(struct blkcipher_desc *desc, | |
165 | struct scatterlist *dst, | |
166 | struct scatterlist *src, | |
167 | unsigned int len)) | |
168 | { | |
169 | struct cryptd_blkcipher_request_ctx *rctx; | |
170 | struct blkcipher_desc desc; | |
171 | ||
172 | rctx = ablkcipher_request_ctx(req); | |
173 | ||
93aa7f8a HX |
174 | if (unlikely(err == -EINPROGRESS)) |
175 | goto out; | |
124b53d0 HX |
176 | |
177 | desc.tfm = child; | |
178 | desc.info = req->info; | |
179 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
180 | ||
181 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
182 | ||
183 | req->base.complete = rctx->complete; | |
184 | ||
93aa7f8a | 185 | out: |
124b53d0 | 186 | local_bh_disable(); |
93aa7f8a | 187 | rctx->complete(&req->base, err); |
124b53d0 HX |
188 | local_bh_enable(); |
189 | } | |
190 | ||
191 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
192 | { | |
193 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
194 | struct crypto_blkcipher *child = ctx->child; | |
195 | ||
196 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
197 | crypto_blkcipher_crt(child)->encrypt); | |
198 | } | |
199 | ||
200 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
201 | { | |
202 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
203 | struct crypto_blkcipher *child = ctx->child; | |
204 | ||
205 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
206 | crypto_blkcipher_crt(child)->decrypt); | |
207 | } | |
208 | ||
209 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
210 | crypto_completion_t complete) | |
211 | { | |
212 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
213 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
254eff77 | 214 | struct cryptd_queue *queue; |
124b53d0 | 215 | |
254eff77 | 216 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
124b53d0 HX |
217 | rctx->complete = req->base.complete; |
218 | req->base.complete = complete; | |
219 | ||
254eff77 | 220 | return cryptd_enqueue_request(queue, &req->base); |
124b53d0 HX |
221 | } |
222 | ||
223 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
224 | { | |
225 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
226 | } | |
227 | ||
228 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
229 | { | |
230 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
231 | } | |
232 | ||
233 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
234 | { | |
235 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
236 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
237 | struct crypto_spawn *spawn = &ictx->spawn; | |
238 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
239 | struct crypto_blkcipher *cipher; | |
240 | ||
241 | cipher = crypto_spawn_blkcipher(spawn); | |
242 | if (IS_ERR(cipher)) | |
243 | return PTR_ERR(cipher); | |
244 | ||
245 | ctx->child = cipher; | |
246 | tfm->crt_ablkcipher.reqsize = | |
247 | sizeof(struct cryptd_blkcipher_request_ctx); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
252 | { | |
253 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
124b53d0 HX |
254 | |
255 | crypto_free_blkcipher(ctx->child); | |
256 | } | |
257 | ||
258 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
46309d89 | 259 | unsigned int tail) |
124b53d0 HX |
260 | { |
261 | struct crypto_instance *inst; | |
124b53d0 HX |
262 | int err; |
263 | ||
46309d89 | 264 | inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL); |
b1145ce3 JL |
265 | if (!inst) { |
266 | inst = ERR_PTR(-ENOMEM); | |
124b53d0 | 267 | goto out; |
b1145ce3 | 268 | } |
124b53d0 HX |
269 | |
270 | err = -ENAMETOOLONG; | |
271 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
272 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
273 | goto out_free_inst; | |
274 | ||
124b53d0 HX |
275 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
276 | ||
277 | inst->alg.cra_priority = alg->cra_priority + 50; | |
278 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
279 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
280 | ||
281 | out: | |
282 | return inst; | |
283 | ||
284 | out_free_inst: | |
285 | kfree(inst); | |
286 | inst = ERR_PTR(err); | |
287 | goto out; | |
288 | } | |
289 | ||
290 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
254eff77 | 291 | struct rtattr **tb, struct cryptd_queue *queue) |
124b53d0 | 292 | { |
46309d89 | 293 | struct cryptd_instance_ctx *ctx; |
124b53d0 HX |
294 | struct crypto_instance *inst; |
295 | struct crypto_alg *alg; | |
46309d89 | 296 | int err; |
124b53d0 HX |
297 | |
298 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 299 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 300 | if (IS_ERR(alg)) |
e231c2ee | 301 | return ERR_CAST(alg); |
124b53d0 | 302 | |
46309d89 | 303 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); |
124b53d0 HX |
304 | if (IS_ERR(inst)) |
305 | goto out_put_alg; | |
306 | ||
46309d89 HX |
307 | ctx = crypto_instance_ctx(inst); |
308 | ctx->queue = queue; | |
309 | ||
310 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
311 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
312 | if (err) | |
313 | goto out_free_inst; | |
314 | ||
332f8840 | 315 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
316 | inst->alg.cra_type = &crypto_ablkcipher_type; |
317 | ||
318 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
319 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
320 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
321 | ||
927eead5 HX |
322 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
323 | ||
124b53d0 HX |
324 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
325 | ||
326 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
327 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
328 | ||
329 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
330 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
331 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
332 | ||
124b53d0 HX |
333 | out_put_alg: |
334 | crypto_mod_put(alg); | |
335 | return inst; | |
46309d89 HX |
336 | |
337 | out_free_inst: | |
338 | kfree(inst); | |
339 | inst = ERR_PTR(err); | |
340 | goto out_put_alg; | |
124b53d0 HX |
341 | } |
342 | ||
b8a28251 LH |
343 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
344 | { | |
345 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
46309d89 HX |
346 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
347 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
b8a28251 | 348 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
46309d89 | 349 | struct crypto_shash *hash; |
b8a28251 | 350 | |
46309d89 HX |
351 | hash = crypto_spawn_shash(spawn); |
352 | if (IS_ERR(hash)) | |
353 | return PTR_ERR(hash); | |
b8a28251 | 354 | |
46309d89 | 355 | ctx->child = hash; |
0d6669e2 HX |
356 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
357 | sizeof(struct cryptd_hash_request_ctx) + | |
358 | crypto_shash_descsize(hash)); | |
b8a28251 LH |
359 | return 0; |
360 | } | |
361 | ||
362 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
363 | { | |
364 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
b8a28251 | 365 | |
46309d89 | 366 | crypto_free_shash(ctx->child); |
b8a28251 LH |
367 | } |
368 | ||
369 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
370 | const u8 *key, unsigned int keylen) | |
371 | { | |
372 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
46309d89 | 373 | struct crypto_shash *child = ctx->child; |
b8a28251 LH |
374 | int err; |
375 | ||
46309d89 HX |
376 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
377 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
378 | CRYPTO_TFM_REQ_MASK); | |
379 | err = crypto_shash_setkey(child, key, keylen); | |
380 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
381 | CRYPTO_TFM_RES_MASK); | |
b8a28251 LH |
382 | return err; |
383 | } | |
384 | ||
385 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
386 | crypto_completion_t complete) | |
387 | { | |
388 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
389 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
254eff77 HY |
390 | struct cryptd_queue *queue = |
391 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | |
b8a28251 LH |
392 | |
393 | rctx->complete = req->base.complete; | |
394 | req->base.complete = complete; | |
395 | ||
254eff77 | 396 | return cryptd_enqueue_request(queue, &req->base); |
b8a28251 LH |
397 | } |
398 | ||
399 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
400 | { | |
46309d89 HX |
401 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
402 | struct crypto_shash *child = ctx->child; | |
403 | struct ahash_request *req = ahash_request_cast(req_async); | |
404 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
405 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
406 | |
407 | if (unlikely(err == -EINPROGRESS)) | |
408 | goto out; | |
409 | ||
46309d89 HX |
410 | desc->tfm = child; |
411 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 412 | |
46309d89 | 413 | err = crypto_shash_init(desc); |
b8a28251 LH |
414 | |
415 | req->base.complete = rctx->complete; | |
416 | ||
417 | out: | |
418 | local_bh_disable(); | |
419 | rctx->complete(&req->base, err); | |
420 | local_bh_enable(); | |
421 | } | |
422 | ||
423 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
424 | { | |
425 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
426 | } | |
427 | ||
428 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
429 | { | |
46309d89 | 430 | struct ahash_request *req = ahash_request_cast(req_async); |
b8a28251 | 431 | struct cryptd_hash_request_ctx *rctx; |
b8a28251 LH |
432 | |
433 | rctx = ahash_request_ctx(req); | |
434 | ||
435 | if (unlikely(err == -EINPROGRESS)) | |
436 | goto out; | |
437 | ||
46309d89 | 438 | err = shash_ahash_update(req, &rctx->desc); |
b8a28251 LH |
439 | |
440 | req->base.complete = rctx->complete; | |
441 | ||
442 | out: | |
443 | local_bh_disable(); | |
444 | rctx->complete(&req->base, err); | |
445 | local_bh_enable(); | |
446 | } | |
447 | ||
448 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
449 | { | |
450 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
451 | } | |
452 | ||
453 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
454 | { | |
46309d89 HX |
455 | struct ahash_request *req = ahash_request_cast(req_async); |
456 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
b8a28251 LH |
457 | |
458 | if (unlikely(err == -EINPROGRESS)) | |
459 | goto out; | |
460 | ||
46309d89 | 461 | err = crypto_shash_final(&rctx->desc, req->result); |
b8a28251 LH |
462 | |
463 | req->base.complete = rctx->complete; | |
464 | ||
465 | out: | |
466 | local_bh_disable(); | |
467 | rctx->complete(&req->base, err); | |
468 | local_bh_enable(); | |
469 | } | |
470 | ||
471 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
472 | { | |
473 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
474 | } | |
475 | ||
476 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
477 | { | |
46309d89 HX |
478 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
479 | struct crypto_shash *child = ctx->child; | |
480 | struct ahash_request *req = ahash_request_cast(req_async); | |
481 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
482 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
483 | |
484 | if (unlikely(err == -EINPROGRESS)) | |
485 | goto out; | |
486 | ||
46309d89 HX |
487 | desc->tfm = child; |
488 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 489 | |
46309d89 | 490 | err = shash_ahash_digest(req, desc); |
b8a28251 LH |
491 | |
492 | req->base.complete = rctx->complete; | |
493 | ||
494 | out: | |
495 | local_bh_disable(); | |
496 | rctx->complete(&req->base, err); | |
497 | local_bh_enable(); | |
498 | } | |
499 | ||
500 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
501 | { | |
502 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
503 | } | |
504 | ||
505 | static struct crypto_instance *cryptd_alloc_hash( | |
254eff77 | 506 | struct rtattr **tb, struct cryptd_queue *queue) |
b8a28251 | 507 | { |
46309d89 | 508 | struct hashd_instance_ctx *ctx; |
b8a28251 | 509 | struct crypto_instance *inst; |
46309d89 | 510 | struct shash_alg *salg; |
b8a28251 | 511 | struct crypto_alg *alg; |
46309d89 | 512 | int err; |
b8a28251 | 513 | |
46309d89 HX |
514 | salg = shash_attr_alg(tb[1], 0, 0); |
515 | if (IS_ERR(salg)) | |
516 | return ERR_CAST(salg); | |
b8a28251 | 517 | |
46309d89 HX |
518 | alg = &salg->base; |
519 | inst = cryptd_alloc_instance(alg, sizeof(*ctx)); | |
b8a28251 LH |
520 | if (IS_ERR(inst)) |
521 | goto out_put_alg; | |
522 | ||
46309d89 HX |
523 | ctx = crypto_instance_ctx(inst); |
524 | ctx->queue = queue; | |
525 | ||
526 | err = crypto_init_shash_spawn(&ctx->spawn, salg, inst); | |
527 | if (err) | |
528 | goto out_free_inst; | |
529 | ||
b8a28251 LH |
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; |
531 | inst->alg.cra_type = &crypto_ahash_type; | |
532 | ||
46309d89 | 533 | inst->alg.cra_ahash.digestsize = salg->digestsize; |
b8a28251 LH |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
535 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | |
538 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | |
544 | ||
545 | out_put_alg: | |
546 | crypto_mod_put(alg); | |
547 | return inst; | |
46309d89 HX |
548 | |
549 | out_free_inst: | |
550 | kfree(inst); | |
551 | inst = ERR_PTR(err); | |
552 | goto out_put_alg; | |
b8a28251 LH |
553 | } |
554 | ||
254eff77 | 555 | static struct cryptd_queue queue; |
124b53d0 HX |
556 | |
557 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
558 | { | |
559 | struct crypto_attr_type *algt; | |
560 | ||
561 | algt = crypto_get_attr_type(tb); | |
562 | if (IS_ERR(algt)) | |
e231c2ee | 563 | return ERR_CAST(algt); |
124b53d0 HX |
564 | |
565 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
566 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
254eff77 | 567 | return cryptd_alloc_blkcipher(tb, &queue); |
b8a28251 | 568 | case CRYPTO_ALG_TYPE_DIGEST: |
254eff77 | 569 | return cryptd_alloc_hash(tb, &queue); |
124b53d0 HX |
570 | } |
571 | ||
572 | return ERR_PTR(-EINVAL); | |
573 | } | |
574 | ||
575 | static void cryptd_free(struct crypto_instance *inst) | |
576 | { | |
577 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
578 | ||
579 | crypto_drop_spawn(&ctx->spawn); | |
580 | kfree(inst); | |
581 | } | |
582 | ||
583 | static struct crypto_template cryptd_tmpl = { | |
584 | .name = "cryptd", | |
585 | .alloc = cryptd_alloc, | |
586 | .free = cryptd_free, | |
587 | .module = THIS_MODULE, | |
588 | }; | |
589 | ||
1cac2cbc HY |
590 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
591 | u32 type, u32 mask) | |
592 | { | |
593 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
505fd21d | 594 | struct crypto_tfm *tfm; |
1cac2cbc HY |
595 | |
596 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
597 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
598 | return ERR_PTR(-EINVAL); | |
505fd21d HY |
599 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
600 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | |
601 | mask &= ~CRYPTO_ALG_TYPE_MASK; | |
602 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | |
603 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | |
1cac2cbc HY |
604 | if (IS_ERR(tfm)) |
605 | return ERR_CAST(tfm); | |
505fd21d HY |
606 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
607 | crypto_free_tfm(tfm); | |
1cac2cbc HY |
608 | return ERR_PTR(-EINVAL); |
609 | } | |
610 | ||
505fd21d | 611 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
1cac2cbc HY |
612 | } |
613 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
614 | ||
615 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
616 | { | |
617 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
618 | return ctx->child; | |
619 | } | |
620 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
621 | ||
622 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |
623 | { | |
624 | crypto_free_ablkcipher(&tfm->base); | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
627 | ||
124b53d0 HX |
628 | static int __init cryptd_init(void) |
629 | { | |
630 | int err; | |
631 | ||
254eff77 | 632 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
124b53d0 HX |
633 | if (err) |
634 | return err; | |
635 | ||
636 | err = crypto_register_template(&cryptd_tmpl); | |
637 | if (err) | |
254eff77 | 638 | cryptd_fini_queue(&queue); |
124b53d0 HX |
639 | |
640 | return err; | |
641 | } | |
642 | ||
643 | static void __exit cryptd_exit(void) | |
644 | { | |
254eff77 | 645 | cryptd_fini_queue(&queue); |
124b53d0 HX |
646 | crypto_unregister_template(&cryptd_tmpl); |
647 | } | |
648 | ||
649 | module_init(cryptd_init); | |
650 | module_exit(cryptd_exit); | |
651 | ||
652 | MODULE_LICENSE("GPL"); | |
653 | MODULE_DESCRIPTION("Software async crypto daemon"); |