]>
Commit | Line | Data |
---|---|---|
1e65b81a TC |
1 | /* |
2 | * Software multibuffer async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> | |
5 | * | |
6 | * Adapted from crypto daemon. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/algapi.h> | |
16 | #include <crypto/internal/hash.h> | |
17 | #include <crypto/internal/aead.h> | |
18 | #include <crypto/mcryptd.h> | |
19 | #include <crypto/crypto_wq.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/hardirq.h> | |
29 | ||
30 | #define MCRYPTD_MAX_CPU_QLEN 100 | |
31 | #define MCRYPTD_BATCH 9 | |
32 | ||
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
34 | unsigned int tail); | |
35 | ||
36 | struct mcryptd_flush_list { | |
37 | struct list_head list; | |
38 | struct mutex lock; | |
39 | }; | |
40 | ||
1f6e97f6 | 41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
1e65b81a TC |
42 | |
43 | struct hashd_instance_ctx { | |
44 | struct crypto_shash_spawn spawn; | |
45 | struct mcryptd_queue *queue; | |
46 | }; | |
47 | ||
48 | static void mcryptd_queue_worker(struct work_struct *work); | |
49 | ||
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | |
51 | { | |
52 | struct mcryptd_flush_list *flist; | |
53 | ||
54 | if (!cstate->flusher_engaged) { | |
55 | /* put the flusher on the flush list */ | |
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
57 | mutex_lock(&flist->lock); | |
58 | list_add_tail(&cstate->flush_list, &flist->list); | |
59 | cstate->flusher_engaged = true; | |
60 | cstate->next_flush = jiffies + delay; | |
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | |
62 | &cstate->flush, delay); | |
63 | mutex_unlock(&flist->lock); | |
64 | } | |
65 | } | |
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); | |
67 | ||
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | |
69 | unsigned int max_cpu_qlen) | |
70 | { | |
71 | int cpu; | |
72 | struct mcryptd_cpu_queue *cpu_queue; | |
73 | ||
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | |
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | |
76 | if (!queue->cpu_queue) | |
77 | return -ENOMEM; | |
78 | for_each_possible_cpu(cpu) { | |
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | |
aae47c2f | 83 | spin_lock_init(&cpu_queue->q_lock); |
1e65b81a TC |
84 | } |
85 | return 0; | |
86 | } | |
87 | ||
88 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | |
89 | { | |
90 | int cpu; | |
91 | struct mcryptd_cpu_queue *cpu_queue; | |
92 | ||
93 | for_each_possible_cpu(cpu) { | |
94 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
95 | BUG_ON(cpu_queue->queue.qlen); | |
96 | } | |
97 | free_percpu(queue->cpu_queue); | |
98 | } | |
99 | ||
100 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |
101 | struct crypto_async_request *request, | |
102 | struct mcryptd_hash_request_ctx *rctx) | |
103 | { | |
104 | int cpu, err; | |
105 | struct mcryptd_cpu_queue *cpu_queue; | |
106 | ||
aae47c2f SAS |
107 | cpu_queue = raw_cpu_ptr(queue->cpu_queue); |
108 | spin_lock(&cpu_queue->q_lock); | |
109 | cpu = smp_processor_id(); | |
110 | rctx->tag.cpu = smp_processor_id(); | |
1e65b81a TC |
111 | |
112 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
113 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | |
114 | cpu, cpu_queue, request); | |
aae47c2f | 115 | spin_unlock(&cpu_queue->q_lock); |
1e65b81a | 116 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
1e65b81a TC |
117 | |
118 | return err; | |
119 | } | |
120 | ||
121 | /* | |
122 | * Try to opportunisticlly flush the partially completed jobs if | |
123 | * crypto daemon is the only task running. | |
124 | */ | |
125 | static void mcryptd_opportunistic_flush(void) | |
126 | { | |
127 | struct mcryptd_flush_list *flist; | |
128 | struct mcryptd_alg_cstate *cstate; | |
129 | ||
130 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
131 | while (single_task_running()) { | |
132 | mutex_lock(&flist->lock); | |
133 | if (list_empty(&flist->list)) { | |
134 | mutex_unlock(&flist->lock); | |
135 | return; | |
136 | } | |
137 | cstate = list_entry(flist->list.next, | |
138 | struct mcryptd_alg_cstate, flush_list); | |
139 | if (!cstate->flusher_engaged) { | |
140 | mutex_unlock(&flist->lock); | |
141 | return; | |
142 | } | |
143 | list_del(&cstate->flush_list); | |
144 | cstate->flusher_engaged = false; | |
145 | mutex_unlock(&flist->lock); | |
146 | cstate->alg_state->flusher(cstate); | |
147 | } | |
148 | } | |
149 | ||
150 | /* | |
151 | * Called in workqueue context, do one real cryption work (via | |
152 | * req->complete) and reschedule itself if there are more work to | |
153 | * do. | |
154 | */ | |
155 | static void mcryptd_queue_worker(struct work_struct *work) | |
156 | { | |
157 | struct mcryptd_cpu_queue *cpu_queue; | |
158 | struct crypto_async_request *req, *backlog; | |
159 | int i; | |
160 | ||
161 | /* | |
162 | * Need to loop through more than once for multi-buffer to | |
163 | * be effective. | |
164 | */ | |
165 | ||
166 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | |
167 | i = 0; | |
168 | while (i < MCRYPTD_BATCH || single_task_running()) { | |
aae47c2f SAS |
169 | |
170 | spin_lock_bh(&cpu_queue->q_lock); | |
1e65b81a TC |
171 | backlog = crypto_get_backlog(&cpu_queue->queue); |
172 | req = crypto_dequeue_request(&cpu_queue->queue); | |
aae47c2f | 173 | spin_unlock_bh(&cpu_queue->q_lock); |
1e65b81a TC |
174 | |
175 | if (!req) { | |
176 | mcryptd_opportunistic_flush(); | |
177 | return; | |
178 | } | |
179 | ||
180 | if (backlog) | |
181 | backlog->complete(backlog, -EINPROGRESS); | |
182 | req->complete(req, 0); | |
183 | if (!cpu_queue->queue.qlen) | |
184 | return; | |
185 | ++i; | |
186 | } | |
187 | if (cpu_queue->queue.qlen) | |
aae47c2f | 188 | queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); |
1e65b81a TC |
189 | } |
190 | ||
191 | void mcryptd_flusher(struct work_struct *__work) | |
192 | { | |
193 | struct mcryptd_alg_cstate *alg_cpu_state; | |
194 | struct mcryptd_alg_state *alg_state; | |
195 | struct mcryptd_flush_list *flist; | |
196 | int cpu; | |
197 | ||
198 | cpu = smp_processor_id(); | |
199 | alg_cpu_state = container_of(to_delayed_work(__work), | |
200 | struct mcryptd_alg_cstate, flush); | |
201 | alg_state = alg_cpu_state->alg_state; | |
202 | if (alg_cpu_state->cpu != cpu) | |
203 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | |
204 | cpu, alg_cpu_state->cpu); | |
205 | ||
206 | if (alg_cpu_state->flusher_engaged) { | |
207 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
208 | mutex_lock(&flist->lock); | |
209 | list_del(&alg_cpu_state->flush_list); | |
210 | alg_cpu_state->flusher_engaged = false; | |
211 | mutex_unlock(&flist->lock); | |
212 | alg_state->flusher(alg_cpu_state); | |
213 | } | |
214 | } | |
215 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | |
216 | ||
217 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | |
218 | { | |
219 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
220 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
221 | ||
222 | return ictx->queue; | |
223 | } | |
224 | ||
225 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
226 | unsigned int tail) | |
227 | { | |
228 | char *p; | |
229 | struct crypto_instance *inst; | |
230 | int err; | |
231 | ||
232 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | |
233 | if (!p) | |
234 | return ERR_PTR(-ENOMEM); | |
235 | ||
236 | inst = (void *)(p + head); | |
237 | ||
238 | err = -ENAMETOOLONG; | |
239 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
240 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
241 | goto out_free_inst; | |
242 | ||
243 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
244 | ||
245 | inst->alg.cra_priority = alg->cra_priority + 50; | |
246 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
247 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
248 | ||
249 | out: | |
250 | return p; | |
251 | ||
252 | out_free_inst: | |
253 | kfree(p); | |
254 | p = ERR_PTR(err); | |
255 | goto out; | |
256 | } | |
257 | ||
713cb322 | 258 | static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, |
f52bbf55 SM |
259 | u32 *mask) |
260 | { | |
261 | struct crypto_attr_type *algt; | |
262 | ||
263 | algt = crypto_get_attr_type(tb); | |
264 | if (IS_ERR(algt)) | |
713cb322 | 265 | return false; |
266 | ||
267 | *type |= algt->type & CRYPTO_ALG_INTERNAL; | |
268 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
269 | ||
270 | if (*type & *mask & CRYPTO_ALG_INTERNAL) | |
271 | return true; | |
272 | else | |
273 | return false; | |
f52bbf55 SM |
274 | } |
275 | ||
1e65b81a TC |
276 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
277 | { | |
278 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
279 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
280 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
281 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
282 | struct crypto_shash *hash; | |
283 | ||
284 | hash = crypto_spawn_shash(spawn); | |
285 | if (IS_ERR(hash)) | |
286 | return PTR_ERR(hash); | |
287 | ||
288 | ctx->child = hash; | |
289 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
290 | sizeof(struct mcryptd_hash_request_ctx) + | |
291 | crypto_shash_descsize(hash)); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
296 | { | |
297 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
298 | ||
299 | crypto_free_shash(ctx->child); | |
300 | } | |
301 | ||
302 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | |
303 | const u8 *key, unsigned int keylen) | |
304 | { | |
305 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
306 | struct crypto_shash *child = ctx->child; | |
307 | int err; | |
308 | ||
309 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
310 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
311 | CRYPTO_TFM_REQ_MASK); | |
312 | err = crypto_shash_setkey(child, key, keylen); | |
313 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
314 | CRYPTO_TFM_RES_MASK); | |
315 | return err; | |
316 | } | |
317 | ||
318 | static int mcryptd_hash_enqueue(struct ahash_request *req, | |
319 | crypto_completion_t complete) | |
320 | { | |
321 | int ret; | |
322 | ||
323 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
324 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
325 | struct mcryptd_queue *queue = | |
326 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | |
327 | ||
328 | rctx->complete = req->base.complete; | |
329 | req->base.complete = complete; | |
330 | ||
331 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
336 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | |
337 | { | |
338 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
339 | struct crypto_shash *child = ctx->child; | |
340 | struct ahash_request *req = ahash_request_cast(req_async); | |
341 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
342 | struct shash_desc *desc = &rctx->desc; | |
343 | ||
344 | if (unlikely(err == -EINPROGRESS)) | |
345 | goto out; | |
346 | ||
347 | desc->tfm = child; | |
348 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
349 | ||
350 | err = crypto_shash_init(desc); | |
351 | ||
352 | req->base.complete = rctx->complete; | |
353 | ||
354 | out: | |
355 | local_bh_disable(); | |
356 | rctx->complete(&req->base, err); | |
357 | local_bh_enable(); | |
358 | } | |
359 | ||
360 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | |
361 | { | |
362 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | |
363 | } | |
364 | ||
365 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | |
366 | { | |
367 | struct ahash_request *req = ahash_request_cast(req_async); | |
368 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
369 | ||
370 | if (unlikely(err == -EINPROGRESS)) | |
371 | goto out; | |
372 | ||
373 | err = shash_ahash_mcryptd_update(req, &rctx->desc); | |
374 | if (err) { | |
375 | req->base.complete = rctx->complete; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | return; | |
380 | out: | |
381 | local_bh_disable(); | |
382 | rctx->complete(&req->base, err); | |
383 | local_bh_enable(); | |
384 | } | |
385 | ||
386 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | |
387 | { | |
388 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | |
389 | } | |
390 | ||
391 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | |
392 | { | |
393 | struct ahash_request *req = ahash_request_cast(req_async); | |
394 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
395 | ||
396 | if (unlikely(err == -EINPROGRESS)) | |
397 | goto out; | |
398 | ||
399 | err = shash_ahash_mcryptd_final(req, &rctx->desc); | |
400 | if (err) { | |
401 | req->base.complete = rctx->complete; | |
402 | goto out; | |
403 | } | |
404 | ||
405 | return; | |
406 | out: | |
407 | local_bh_disable(); | |
408 | rctx->complete(&req->base, err); | |
409 | local_bh_enable(); | |
410 | } | |
411 | ||
412 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | |
413 | { | |
414 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | |
415 | } | |
416 | ||
417 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | |
418 | { | |
419 | struct ahash_request *req = ahash_request_cast(req_async); | |
420 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
421 | ||
422 | if (unlikely(err == -EINPROGRESS)) | |
423 | goto out; | |
424 | ||
425 | err = shash_ahash_mcryptd_finup(req, &rctx->desc); | |
426 | ||
427 | if (err) { | |
428 | req->base.complete = rctx->complete; | |
429 | goto out; | |
430 | } | |
431 | ||
432 | return; | |
433 | out: | |
434 | local_bh_disable(); | |
435 | rctx->complete(&req->base, err); | |
436 | local_bh_enable(); | |
437 | } | |
438 | ||
439 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | |
440 | { | |
441 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | |
442 | } | |
443 | ||
444 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
445 | { | |
446 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
447 | struct crypto_shash *child = ctx->child; | |
448 | struct ahash_request *req = ahash_request_cast(req_async); | |
449 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
450 | struct shash_desc *desc = &rctx->desc; | |
451 | ||
452 | if (unlikely(err == -EINPROGRESS)) | |
453 | goto out; | |
454 | ||
455 | desc->tfm = child; | |
456 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ | |
457 | ||
458 | err = shash_ahash_mcryptd_digest(req, desc); | |
459 | ||
460 | if (err) { | |
461 | req->base.complete = rctx->complete; | |
462 | goto out; | |
463 | } | |
464 | ||
465 | return; | |
466 | out: | |
467 | local_bh_disable(); | |
468 | rctx->complete(&req->base, err); | |
469 | local_bh_enable(); | |
470 | } | |
471 | ||
472 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | |
473 | { | |
474 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | |
475 | } | |
476 | ||
477 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | |
478 | { | |
479 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
480 | ||
481 | return crypto_shash_export(&rctx->desc, out); | |
482 | } | |
483 | ||
484 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | |
485 | { | |
486 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
487 | ||
488 | return crypto_shash_import(&rctx->desc, in); | |
489 | } | |
490 | ||
491 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |
492 | struct mcryptd_queue *queue) | |
493 | { | |
494 | struct hashd_instance_ctx *ctx; | |
495 | struct ahash_instance *inst; | |
496 | struct shash_alg *salg; | |
497 | struct crypto_alg *alg; | |
f52bbf55 SM |
498 | u32 type = 0; |
499 | u32 mask = 0; | |
1e65b81a TC |
500 | int err; |
501 | ||
713cb322 | 502 | if (!mcryptd_check_internal(tb, &type, &mask)) |
503 | return -EINVAL; | |
f52bbf55 SM |
504 | |
505 | salg = shash_attr_alg(tb[1], type, mask); | |
1e65b81a TC |
506 | if (IS_ERR(salg)) |
507 | return PTR_ERR(salg); | |
508 | ||
509 | alg = &salg->base; | |
510 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); | |
511 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | |
512 | sizeof(*ctx)); | |
513 | err = PTR_ERR(inst); | |
514 | if (IS_ERR(inst)) | |
515 | goto out_put_alg; | |
516 | ||
517 | ctx = ahash_instance_ctx(inst); | |
518 | ctx->queue = queue; | |
519 | ||
520 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | |
521 | ahash_crypto_instance(inst)); | |
522 | if (err) | |
523 | goto out_free_inst; | |
524 | ||
f52bbf55 SM |
525 | type = CRYPTO_ALG_ASYNC; |
526 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
527 | type |= CRYPTO_ALG_INTERNAL; | |
528 | inst->alg.halg.base.cra_flags = type; | |
1e65b81a TC |
529 | |
530 | inst->alg.halg.digestsize = salg->digestsize; | |
f24c7205 | 531 | inst->alg.halg.statesize = salg->statesize; |
1e65b81a TC |
532 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
533 | ||
534 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | |
535 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | |
536 | ||
537 | inst->alg.init = mcryptd_hash_init_enqueue; | |
538 | inst->alg.update = mcryptd_hash_update_enqueue; | |
539 | inst->alg.final = mcryptd_hash_final_enqueue; | |
540 | inst->alg.finup = mcryptd_hash_finup_enqueue; | |
541 | inst->alg.export = mcryptd_hash_export; | |
542 | inst->alg.import = mcryptd_hash_import; | |
543 | inst->alg.setkey = mcryptd_hash_setkey; | |
544 | inst->alg.digest = mcryptd_hash_digest_enqueue; | |
545 | ||
546 | err = ahash_register_instance(tmpl, inst); | |
547 | if (err) { | |
548 | crypto_drop_shash(&ctx->spawn); | |
549 | out_free_inst: | |
550 | kfree(inst); | |
551 | } | |
552 | ||
553 | out_put_alg: | |
554 | crypto_mod_put(alg); | |
555 | return err; | |
556 | } | |
557 | ||
558 | static struct mcryptd_queue mqueue; | |
559 | ||
560 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |
561 | { | |
562 | struct crypto_attr_type *algt; | |
563 | ||
564 | algt = crypto_get_attr_type(tb); | |
565 | if (IS_ERR(algt)) | |
566 | return PTR_ERR(algt); | |
567 | ||
568 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
569 | case CRYPTO_ALG_TYPE_DIGEST: | |
570 | return mcryptd_create_hash(tmpl, tb, &mqueue); | |
571 | break; | |
572 | } | |
573 | ||
574 | return -EINVAL; | |
575 | } | |
576 | ||
577 | static void mcryptd_free(struct crypto_instance *inst) | |
578 | { | |
579 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
580 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | |
581 | ||
582 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
583 | case CRYPTO_ALG_TYPE_AHASH: | |
584 | crypto_drop_shash(&hctx->spawn); | |
585 | kfree(ahash_instance(inst)); | |
586 | return; | |
587 | default: | |
588 | crypto_drop_spawn(&ctx->spawn); | |
589 | kfree(inst); | |
590 | } | |
591 | } | |
592 | ||
593 | static struct crypto_template mcryptd_tmpl = { | |
594 | .name = "mcryptd", | |
595 | .create = mcryptd_create, | |
596 | .free = mcryptd_free, | |
597 | .module = THIS_MODULE, | |
598 | }; | |
599 | ||
600 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | |
601 | u32 type, u32 mask) | |
602 | { | |
603 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
604 | struct crypto_ahash *tfm; | |
605 | ||
606 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
607 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
608 | return ERR_PTR(-EINVAL); | |
609 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | |
610 | if (IS_ERR(tfm)) | |
611 | return ERR_CAST(tfm); | |
612 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
613 | crypto_free_ahash(tfm); | |
614 | return ERR_PTR(-EINVAL); | |
615 | } | |
616 | ||
617 | return __mcryptd_ahash_cast(tfm); | |
618 | } | |
619 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | |
620 | ||
621 | int shash_ahash_mcryptd_digest(struct ahash_request *req, | |
622 | struct shash_desc *desc) | |
623 | { | |
624 | int err; | |
625 | ||
626 | err = crypto_shash_init(desc) ?: | |
627 | shash_ahash_mcryptd_finup(req, desc); | |
628 | ||
629 | return err; | |
630 | } | |
631 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); | |
632 | ||
633 | int shash_ahash_mcryptd_update(struct ahash_request *req, | |
634 | struct shash_desc *desc) | |
635 | { | |
636 | struct crypto_shash *tfm = desc->tfm; | |
637 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
638 | ||
639 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
640 | ||
641 | return shash->update(desc, NULL, 0); | |
642 | } | |
643 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); | |
644 | ||
645 | int shash_ahash_mcryptd_finup(struct ahash_request *req, | |
646 | struct shash_desc *desc) | |
647 | { | |
648 | struct crypto_shash *tfm = desc->tfm; | |
649 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
650 | ||
651 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
652 | ||
653 | return shash->finup(desc, NULL, 0, req->result); | |
654 | } | |
655 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); | |
656 | ||
657 | int shash_ahash_mcryptd_final(struct ahash_request *req, | |
658 | struct shash_desc *desc) | |
659 | { | |
660 | struct crypto_shash *tfm = desc->tfm; | |
661 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
662 | ||
663 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
664 | ||
665 | return shash->final(desc, req->result); | |
666 | } | |
667 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); | |
668 | ||
669 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) | |
670 | { | |
671 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
672 | ||
673 | return ctx->child; | |
674 | } | |
675 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | |
676 | ||
677 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) | |
678 | { | |
679 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
680 | return &rctx->desc; | |
681 | } | |
682 | EXPORT_SYMBOL_GPL(mcryptd_shash_desc); | |
683 | ||
684 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | |
685 | { | |
686 | crypto_free_ahash(&tfm->base); | |
687 | } | |
688 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | |
689 | ||
690 | ||
691 | static int __init mcryptd_init(void) | |
692 | { | |
693 | int err, cpu; | |
694 | struct mcryptd_flush_list *flist; | |
695 | ||
696 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | |
697 | for_each_possible_cpu(cpu) { | |
698 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
699 | INIT_LIST_HEAD(&flist->list); | |
700 | mutex_init(&flist->lock); | |
701 | } | |
702 | ||
703 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | |
704 | if (err) { | |
705 | free_percpu(mcryptd_flist); | |
706 | return err; | |
707 | } | |
708 | ||
709 | err = crypto_register_template(&mcryptd_tmpl); | |
710 | if (err) { | |
711 | mcryptd_fini_queue(&mqueue); | |
712 | free_percpu(mcryptd_flist); | |
713 | } | |
714 | ||
715 | return err; | |
716 | } | |
717 | ||
718 | static void __exit mcryptd_exit(void) | |
719 | { | |
720 | mcryptd_fini_queue(&mqueue); | |
721 | crypto_unregister_template(&mcryptd_tmpl); | |
722 | free_percpu(mcryptd_flist); | |
723 | } | |
724 | ||
725 | subsys_initcall(mcryptd_init); | |
726 | module_exit(mcryptd_exit); | |
727 | ||
728 | MODULE_LICENSE("GPL"); | |
729 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | |
4943ba16 | 730 | MODULE_ALIAS_CRYPTO("mcryptd"); |