]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/ahash.c
PCI: PM: Skip devices in D0 for suspend-to-idle
[mirror_ubuntu-bionic-kernel.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 struct ahash_request_priv {
32 crypto_completion_t complete;
33 void *data;
34 u8 *result;
35 u32 flags;
36 void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 };
38
39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 {
41 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42 halg);
43 }
44
45 static int hash_walk_next(struct crypto_hash_walk *walk)
46 {
47 unsigned int alignmask = walk->alignmask;
48 unsigned int offset = walk->offset;
49 unsigned int nbytes = min(walk->entrylen,
50 ((unsigned int)(PAGE_SIZE)) - offset);
51
52 if (walk->flags & CRYPTO_ALG_ASYNC)
53 walk->data = kmap(walk->pg);
54 else
55 walk->data = kmap_atomic(walk->pg);
56 walk->data += offset;
57
58 if (offset & alignmask) {
59 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60
61 if (nbytes > unaligned)
62 nbytes = unaligned;
63 }
64
65 walk->entrylen -= nbytes;
66 return nbytes;
67 }
68
69 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70 {
71 struct scatterlist *sg;
72
73 sg = walk->sg;
74 walk->offset = sg->offset;
75 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76 walk->offset = offset_in_page(walk->offset);
77 walk->entrylen = sg->length;
78
79 if (walk->entrylen > walk->total)
80 walk->entrylen = walk->total;
81 walk->total -= walk->entrylen;
82
83 return hash_walk_next(walk);
84 }
85
86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87 {
88 unsigned int alignmask = walk->alignmask;
89 unsigned int nbytes = walk->entrylen;
90
91 walk->data -= walk->offset;
92
93 if (nbytes && walk->offset & alignmask && !err) {
94 walk->offset = ALIGN(walk->offset, alignmask + 1);
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
98
99 if (nbytes) {
100 walk->data += walk->offset;
101 return nbytes;
102 }
103 }
104
105 if (walk->flags & CRYPTO_ALG_ASYNC)
106 kunmap(walk->pg);
107 else {
108 kunmap_atomic(walk->data);
109 /*
110 * The may sleep test only makes sense for sync users.
111 * Async users don't need to sleep here anyway.
112 */
113 crypto_yield(walk->flags);
114 }
115
116 if (err)
117 return err;
118
119 if (nbytes) {
120 walk->offset = 0;
121 walk->pg++;
122 return hash_walk_next(walk);
123 }
124
125 if (!walk->total)
126 return 0;
127
128 walk->sg = sg_next(walk->sg);
129
130 return hash_walk_new_entry(walk);
131 }
132 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
133
134 int crypto_hash_walk_first(struct ahash_request *req,
135 struct crypto_hash_walk *walk)
136 {
137 walk->total = req->nbytes;
138
139 if (!walk->total) {
140 walk->entrylen = 0;
141 return 0;
142 }
143
144 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
145 walk->sg = req->src;
146 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147
148 return hash_walk_new_entry(walk);
149 }
150 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
151
152 int crypto_ahash_walk_first(struct ahash_request *req,
153 struct crypto_hash_walk *walk)
154 {
155 walk->total = req->nbytes;
156
157 if (!walk->total) {
158 walk->entrylen = 0;
159 return 0;
160 }
161
162 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
163 walk->sg = req->src;
164 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
165 walk->flags |= CRYPTO_ALG_ASYNC;
166
167 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
168
169 return hash_walk_new_entry(walk);
170 }
171 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
172
173 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
174 unsigned int keylen)
175 {
176 unsigned long alignmask = crypto_ahash_alignmask(tfm);
177 int ret;
178 u8 *buffer, *alignbuffer;
179 unsigned long absize;
180
181 absize = keylen + alignmask;
182 buffer = kmalloc(absize, GFP_KERNEL);
183 if (!buffer)
184 return -ENOMEM;
185
186 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
187 memcpy(alignbuffer, key, keylen);
188 ret = tfm->setkey(tfm, alignbuffer, keylen);
189 kzfree(buffer);
190 return ret;
191 }
192
193 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
194 unsigned int keylen)
195 {
196 unsigned long alignmask = crypto_ahash_alignmask(tfm);
197 int err;
198
199 if ((unsigned long)key & alignmask)
200 err = ahash_setkey_unaligned(tfm, key, keylen);
201 else
202 err = tfm->setkey(tfm, key, keylen);
203
204 if (err)
205 return err;
206
207 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
208 return 0;
209 }
210 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
211
212 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
213 unsigned int keylen)
214 {
215 return -ENOSYS;
216 }
217
218 static inline unsigned int ahash_align_buffer_size(unsigned len,
219 unsigned long mask)
220 {
221 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
222 }
223
224 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
225 {
226 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
227 unsigned long alignmask = crypto_ahash_alignmask(tfm);
228 unsigned int ds = crypto_ahash_digestsize(tfm);
229 struct ahash_request_priv *priv;
230
231 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
232 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233 GFP_KERNEL : GFP_ATOMIC);
234 if (!priv)
235 return -ENOMEM;
236
237 /*
238 * WARNING: Voodoo programming below!
239 *
240 * The code below is obscure and hard to understand, thus explanation
241 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
242 * to understand the layout of structures used here!
243 *
244 * The code here will replace portions of the ORIGINAL request with
245 * pointers to new code and buffers so the hashing operation can store
246 * the result in aligned buffer. We will call the modified request
247 * an ADJUSTED request.
248 *
249 * The newly mangled request will look as such:
250 *
251 * req {
252 * .result = ADJUSTED[new aligned buffer]
253 * .base.complete = ADJUSTED[pointer to completion function]
254 * .base.data = ADJUSTED[*req (pointer to self)]
255 * .priv = ADJUSTED[new priv] {
256 * .result = ORIGINAL(result)
257 * .complete = ORIGINAL(base.complete)
258 * .data = ORIGINAL(base.data)
259 * }
260 */
261
262 priv->result = req->result;
263 priv->complete = req->base.complete;
264 priv->data = req->base.data;
265 priv->flags = req->base.flags;
266
267 /*
268 * WARNING: We do not backup req->priv here! The req->priv
269 * is for internal use of the Crypto API and the
270 * user must _NOT_ _EVER_ depend on it's content!
271 */
272
273 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
274 req->base.complete = cplt;
275 req->base.data = req;
276 req->priv = priv;
277
278 return 0;
279 }
280
281 static void ahash_restore_req(struct ahash_request *req, int err)
282 {
283 struct ahash_request_priv *priv = req->priv;
284
285 if (!err)
286 memcpy(priv->result, req->result,
287 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
288
289 /* Restore the original crypto request. */
290 req->result = priv->result;
291
292 ahash_request_set_callback(req, priv->flags,
293 priv->complete, priv->data);
294 req->priv = NULL;
295
296 /* Free the req->priv.priv from the ADJUSTED request. */
297 kzfree(priv);
298 }
299
300 static void ahash_notify_einprogress(struct ahash_request *req)
301 {
302 struct ahash_request_priv *priv = req->priv;
303 struct crypto_async_request oreq;
304
305 oreq.data = priv->data;
306
307 priv->complete(&oreq, -EINPROGRESS);
308 }
309
310 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
311 {
312 struct ahash_request *areq = req->data;
313
314 if (err == -EINPROGRESS) {
315 ahash_notify_einprogress(areq);
316 return;
317 }
318
319 /*
320 * Restore the original request, see ahash_op_unaligned() for what
321 * goes where.
322 *
323 * The "struct ahash_request *req" here is in fact the "req.base"
324 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
325 * is a pointer to self, it is also the ADJUSTED "req" .
326 */
327
328 /* First copy req->result into req->priv.result */
329 ahash_restore_req(areq, err);
330
331 /* Complete the ORIGINAL request. */
332 areq->base.complete(&areq->base, err);
333 }
334
335 static int ahash_op_unaligned(struct ahash_request *req,
336 int (*op)(struct ahash_request *))
337 {
338 int err;
339
340 err = ahash_save_req(req, ahash_op_unaligned_done);
341 if (err)
342 return err;
343
344 err = op(req);
345 if (err == -EINPROGRESS || err == -EBUSY)
346 return err;
347
348 ahash_restore_req(req, err);
349
350 return err;
351 }
352
353 static int crypto_ahash_op(struct ahash_request *req,
354 int (*op)(struct ahash_request *))
355 {
356 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357 unsigned long alignmask = crypto_ahash_alignmask(tfm);
358
359 if ((unsigned long)req->result & alignmask)
360 return ahash_op_unaligned(req, op);
361
362 return op(req);
363 }
364
365 int crypto_ahash_final(struct ahash_request *req)
366 {
367 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
368 }
369 EXPORT_SYMBOL_GPL(crypto_ahash_final);
370
371 int crypto_ahash_finup(struct ahash_request *req)
372 {
373 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
374 }
375 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
376
377 int crypto_ahash_digest(struct ahash_request *req)
378 {
379 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
380
381 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
382 return -ENOKEY;
383
384 return crypto_ahash_op(req, tfm->digest);
385 }
386 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
387
388 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
389 {
390 struct ahash_request *areq = req->data;
391
392 if (err == -EINPROGRESS)
393 return;
394
395 ahash_restore_req(areq, err);
396
397 areq->base.complete(&areq->base, err);
398 }
399
400 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
401 {
402 if (err)
403 goto out;
404
405 req->base.complete = ahash_def_finup_done2;
406
407 err = crypto_ahash_reqtfm(req)->final(req);
408 if (err == -EINPROGRESS || err == -EBUSY)
409 return err;
410
411 out:
412 ahash_restore_req(req, err);
413 return err;
414 }
415
416 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
417 {
418 struct ahash_request *areq = req->data;
419
420 if (err == -EINPROGRESS) {
421 ahash_notify_einprogress(areq);
422 return;
423 }
424
425 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
426
427 err = ahash_def_finup_finish1(areq, err);
428 if (areq->priv)
429 return;
430
431 areq->base.complete(&areq->base, err);
432 }
433
434 static int ahash_def_finup(struct ahash_request *req)
435 {
436 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
437 int err;
438
439 err = ahash_save_req(req, ahash_def_finup_done1);
440 if (err)
441 return err;
442
443 err = tfm->update(req);
444 if (err == -EINPROGRESS || err == -EBUSY)
445 return err;
446
447 return ahash_def_finup_finish1(req, err);
448 }
449
450 static int ahash_no_export(struct ahash_request *req, void *out)
451 {
452 return -ENOSYS;
453 }
454
455 static int ahash_no_import(struct ahash_request *req, const void *in)
456 {
457 return -ENOSYS;
458 }
459
460 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
461 {
462 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
463 struct ahash_alg *alg = crypto_ahash_alg(hash);
464
465 hash->setkey = ahash_nosetkey;
466 hash->export = ahash_no_export;
467 hash->import = ahash_no_import;
468
469 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
470 return crypto_init_shash_ops_async(tfm);
471
472 hash->init = alg->init;
473 hash->update = alg->update;
474 hash->final = alg->final;
475 hash->finup = alg->finup ?: ahash_def_finup;
476 hash->digest = alg->digest;
477
478 if (alg->setkey) {
479 hash->setkey = alg->setkey;
480 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
481 crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
482 }
483 if (alg->export)
484 hash->export = alg->export;
485 if (alg->import)
486 hash->import = alg->import;
487
488 return 0;
489 }
490
491 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
492 {
493 if (alg->cra_type != &crypto_ahash_type)
494 return sizeof(struct crypto_shash *);
495
496 return crypto_alg_extsize(alg);
497 }
498
499 #ifdef CONFIG_NET
500 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
501 {
502 struct crypto_report_hash rhash;
503
504 strncpy(rhash.type, "ahash", sizeof(rhash.type));
505
506 rhash.blocksize = alg->cra_blocksize;
507 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
508
509 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
510 sizeof(struct crypto_report_hash), &rhash))
511 goto nla_put_failure;
512 return 0;
513
514 nla_put_failure:
515 return -EMSGSIZE;
516 }
517 #else
518 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
519 {
520 return -ENOSYS;
521 }
522 #endif
523
524 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
525 __maybe_unused;
526 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
527 {
528 seq_printf(m, "type : ahash\n");
529 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
530 "yes" : "no");
531 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
532 seq_printf(m, "digestsize : %u\n",
533 __crypto_hash_alg_common(alg)->digestsize);
534 }
535
536 const struct crypto_type crypto_ahash_type = {
537 .extsize = crypto_ahash_extsize,
538 .init_tfm = crypto_ahash_init_tfm,
539 #ifdef CONFIG_PROC_FS
540 .show = crypto_ahash_show,
541 #endif
542 .report = crypto_ahash_report,
543 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
544 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
545 .type = CRYPTO_ALG_TYPE_AHASH,
546 .tfmsize = offsetof(struct crypto_ahash, base),
547 };
548 EXPORT_SYMBOL_GPL(crypto_ahash_type);
549
550 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
551 u32 mask)
552 {
553 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
554 }
555 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
556
557 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
558 {
559 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
560 }
561 EXPORT_SYMBOL_GPL(crypto_has_ahash);
562
563 static int ahash_prepare_alg(struct ahash_alg *alg)
564 {
565 struct crypto_alg *base = &alg->halg.base;
566
567 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
568 alg->halg.statesize > PAGE_SIZE / 8 ||
569 alg->halg.statesize == 0)
570 return -EINVAL;
571
572 base->cra_type = &crypto_ahash_type;
573 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
574 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
575
576 return 0;
577 }
578
579 int crypto_register_ahash(struct ahash_alg *alg)
580 {
581 struct crypto_alg *base = &alg->halg.base;
582 int err;
583
584 err = ahash_prepare_alg(alg);
585 if (err)
586 return err;
587
588 return crypto_register_alg(base);
589 }
590 EXPORT_SYMBOL_GPL(crypto_register_ahash);
591
592 int crypto_unregister_ahash(struct ahash_alg *alg)
593 {
594 return crypto_unregister_alg(&alg->halg.base);
595 }
596 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
597
598 int crypto_register_ahashes(struct ahash_alg *algs, int count)
599 {
600 int i, ret;
601
602 for (i = 0; i < count; i++) {
603 ret = crypto_register_ahash(&algs[i]);
604 if (ret)
605 goto err;
606 }
607
608 return 0;
609
610 err:
611 for (--i; i >= 0; --i)
612 crypto_unregister_ahash(&algs[i]);
613
614 return ret;
615 }
616 EXPORT_SYMBOL_GPL(crypto_register_ahashes);
617
618 void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
619 {
620 int i;
621
622 for (i = count - 1; i >= 0; --i)
623 crypto_unregister_ahash(&algs[i]);
624 }
625 EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
626
627 int ahash_register_instance(struct crypto_template *tmpl,
628 struct ahash_instance *inst)
629 {
630 int err;
631
632 err = ahash_prepare_alg(&inst->alg);
633 if (err)
634 return err;
635
636 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
637 }
638 EXPORT_SYMBOL_GPL(ahash_register_instance);
639
640 void ahash_free_instance(struct crypto_instance *inst)
641 {
642 crypto_drop_spawn(crypto_instance_ctx(inst));
643 kfree(ahash_instance(inst));
644 }
645 EXPORT_SYMBOL_GPL(ahash_free_instance);
646
647 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
648 struct hash_alg_common *alg,
649 struct crypto_instance *inst)
650 {
651 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
652 &crypto_ahash_type);
653 }
654 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
655
656 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
657 {
658 struct crypto_alg *alg;
659
660 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
661 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
662 }
663 EXPORT_SYMBOL_GPL(ahash_attr_alg);
664
665 bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
666 {
667 struct crypto_alg *alg = &halg->base;
668
669 if (alg->cra_type != &crypto_ahash_type)
670 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
671
672 return __crypto_ahash_alg(alg)->setkey != NULL;
673 }
674 EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
675
676 MODULE_LICENSE("GPL");
677 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");