]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - crypto/ahash.c
UBUNTU: Start new release
[mirror_ubuntu-zesty-kernel.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR;
36 };
37
38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 {
40 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 halg);
42 }
43
44 static int hash_walk_next(struct crypto_hash_walk *walk)
45 {
46 unsigned int alignmask = walk->alignmask;
47 unsigned int offset = walk->offset;
48 unsigned int nbytes = min(walk->entrylen,
49 ((unsigned int)(PAGE_SIZE)) - offset);
50
51 if (walk->flags & CRYPTO_ALG_ASYNC)
52 walk->data = kmap(walk->pg);
53 else
54 walk->data = kmap_atomic(walk->pg);
55 walk->data += offset;
56
57 if (offset & alignmask) {
58 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59
60 if (nbytes > unaligned)
61 nbytes = unaligned;
62 }
63
64 walk->entrylen -= nbytes;
65 return nbytes;
66 }
67
68 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
69 {
70 struct scatterlist *sg;
71
72 sg = walk->sg;
73 walk->offset = sg->offset;
74 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
75 walk->offset = offset_in_page(walk->offset);
76 walk->entrylen = sg->length;
77
78 if (walk->entrylen > walk->total)
79 walk->entrylen = walk->total;
80 walk->total -= walk->entrylen;
81
82 return hash_walk_next(walk);
83 }
84
85 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
86 {
87 unsigned int alignmask = walk->alignmask;
88 unsigned int nbytes = walk->entrylen;
89
90 walk->data -= walk->offset;
91
92 if (nbytes && walk->offset & alignmask && !err) {
93 walk->offset = ALIGN(walk->offset, alignmask + 1);
94 walk->data += walk->offset;
95
96 nbytes = min(nbytes,
97 ((unsigned int)(PAGE_SIZE)) - walk->offset);
98 walk->entrylen -= nbytes;
99
100 return nbytes;
101 }
102
103 if (walk->flags & CRYPTO_ALG_ASYNC)
104 kunmap(walk->pg);
105 else {
106 kunmap_atomic(walk->data);
107 /*
108 * The may sleep test only makes sense for sync users.
109 * Async users don't need to sleep here anyway.
110 */
111 crypto_yield(walk->flags);
112 }
113
114 if (err)
115 return err;
116
117 if (nbytes) {
118 walk->offset = 0;
119 walk->pg++;
120 return hash_walk_next(walk);
121 }
122
123 if (!walk->total)
124 return 0;
125
126 walk->sg = sg_next(walk->sg);
127
128 return hash_walk_new_entry(walk);
129 }
130 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
131
132 int crypto_hash_walk_first(struct ahash_request *req,
133 struct crypto_hash_walk *walk)
134 {
135 walk->total = req->nbytes;
136
137 if (!walk->total) {
138 walk->entrylen = 0;
139 return 0;
140 }
141
142 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
143 walk->sg = req->src;
144 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
145
146 return hash_walk_new_entry(walk);
147 }
148 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
149
150 int crypto_ahash_walk_first(struct ahash_request *req,
151 struct crypto_hash_walk *walk)
152 {
153 walk->total = req->nbytes;
154
155 if (!walk->total) {
156 walk->entrylen = 0;
157 return 0;
158 }
159
160 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
161 walk->sg = req->src;
162 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
163 walk->flags |= CRYPTO_ALG_ASYNC;
164
165 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
166
167 return hash_walk_new_entry(walk);
168 }
169 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
170
171 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
172 unsigned int keylen)
173 {
174 unsigned long alignmask = crypto_ahash_alignmask(tfm);
175 int ret;
176 u8 *buffer, *alignbuffer;
177 unsigned long absize;
178
179 absize = keylen + alignmask;
180 buffer = kmalloc(absize, GFP_KERNEL);
181 if (!buffer)
182 return -ENOMEM;
183
184 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
185 memcpy(alignbuffer, key, keylen);
186 ret = tfm->setkey(tfm, alignbuffer, keylen);
187 kzfree(buffer);
188 return ret;
189 }
190
191 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
192 unsigned int keylen)
193 {
194 unsigned long alignmask = crypto_ahash_alignmask(tfm);
195
196 if ((unsigned long)key & alignmask)
197 return ahash_setkey_unaligned(tfm, key, keylen);
198
199 return tfm->setkey(tfm, key, keylen);
200 }
201 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
202
203 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
204 unsigned int keylen)
205 {
206 return -ENOSYS;
207 }
208
209 static inline unsigned int ahash_align_buffer_size(unsigned len,
210 unsigned long mask)
211 {
212 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
213 }
214
215 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
216 {
217 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
218 unsigned long alignmask = crypto_ahash_alignmask(tfm);
219 unsigned int ds = crypto_ahash_digestsize(tfm);
220 struct ahash_request_priv *priv;
221
222 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
223 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
224 GFP_KERNEL : GFP_ATOMIC);
225 if (!priv)
226 return -ENOMEM;
227
228 /*
229 * WARNING: Voodoo programming below!
230 *
231 * The code below is obscure and hard to understand, thus explanation
232 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
233 * to understand the layout of structures used here!
234 *
235 * The code here will replace portions of the ORIGINAL request with
236 * pointers to new code and buffers so the hashing operation can store
237 * the result in aligned buffer. We will call the modified request
238 * an ADJUSTED request.
239 *
240 * The newly mangled request will look as such:
241 *
242 * req {
243 * .result = ADJUSTED[new aligned buffer]
244 * .base.complete = ADJUSTED[pointer to completion function]
245 * .base.data = ADJUSTED[*req (pointer to self)]
246 * .priv = ADJUSTED[new priv] {
247 * .result = ORIGINAL(result)
248 * .complete = ORIGINAL(base.complete)
249 * .data = ORIGINAL(base.data)
250 * }
251 */
252
253 priv->result = req->result;
254 priv->complete = req->base.complete;
255 priv->data = req->base.data;
256 priv->flags = req->base.flags;
257
258 /*
259 * WARNING: We do not backup req->priv here! The req->priv
260 * is for internal use of the Crypto API and the
261 * user must _NOT_ _EVER_ depend on it's content!
262 */
263
264 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
265 req->base.complete = cplt;
266 req->base.data = req;
267 req->priv = priv;
268
269 return 0;
270 }
271
272 static void ahash_restore_req(struct ahash_request *req, int err)
273 {
274 struct ahash_request_priv *priv = req->priv;
275
276 if (!err)
277 memcpy(priv->result, req->result,
278 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
279
280 /* Restore the original crypto request. */
281 req->result = priv->result;
282
283 ahash_request_set_callback(req, priv->flags,
284 priv->complete, priv->data);
285 req->priv = NULL;
286
287 /* Free the req->priv.priv from the ADJUSTED request. */
288 kzfree(priv);
289 }
290
291 static void ahash_notify_einprogress(struct ahash_request *req)
292 {
293 struct ahash_request_priv *priv = req->priv;
294 struct crypto_async_request oreq;
295
296 oreq.data = priv->data;
297
298 priv->complete(&oreq, -EINPROGRESS);
299 }
300
301 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
302 {
303 struct ahash_request *areq = req->data;
304
305 if (err == -EINPROGRESS) {
306 ahash_notify_einprogress(areq);
307 return;
308 }
309
310 /*
311 * Restore the original request, see ahash_op_unaligned() for what
312 * goes where.
313 *
314 * The "struct ahash_request *req" here is in fact the "req.base"
315 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
316 * is a pointer to self, it is also the ADJUSTED "req" .
317 */
318
319 /* First copy req->result into req->priv.result */
320 ahash_restore_req(areq, err);
321
322 /* Complete the ORIGINAL request. */
323 areq->base.complete(&areq->base, err);
324 }
325
326 static int ahash_op_unaligned(struct ahash_request *req,
327 int (*op)(struct ahash_request *))
328 {
329 int err;
330
331 err = ahash_save_req(req, ahash_op_unaligned_done);
332 if (err)
333 return err;
334
335 err = op(req);
336 if (err == -EINPROGRESS ||
337 (err == -EBUSY && (ahash_request_flags(req) &
338 CRYPTO_TFM_REQ_MAY_BACKLOG)))
339 return err;
340
341 ahash_restore_req(req, err);
342
343 return err;
344 }
345
346 static int crypto_ahash_op(struct ahash_request *req,
347 int (*op)(struct ahash_request *))
348 {
349 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
350 unsigned long alignmask = crypto_ahash_alignmask(tfm);
351
352 if ((unsigned long)req->result & alignmask)
353 return ahash_op_unaligned(req, op);
354
355 return op(req);
356 }
357
358 int crypto_ahash_final(struct ahash_request *req)
359 {
360 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
361 }
362 EXPORT_SYMBOL_GPL(crypto_ahash_final);
363
364 int crypto_ahash_finup(struct ahash_request *req)
365 {
366 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
367 }
368 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
369
370 int crypto_ahash_digest(struct ahash_request *req)
371 {
372 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
373 }
374 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
375
376 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
377 {
378 struct ahash_request *areq = req->data;
379
380 if (err == -EINPROGRESS)
381 return;
382
383 ahash_restore_req(areq, err);
384
385 areq->base.complete(&areq->base, err);
386 }
387
388 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
389 {
390 if (err)
391 goto out;
392
393 req->base.complete = ahash_def_finup_done2;
394
395 err = crypto_ahash_reqtfm(req)->final(req);
396 if (err == -EINPROGRESS ||
397 (err == -EBUSY && (ahash_request_flags(req) &
398 CRYPTO_TFM_REQ_MAY_BACKLOG)))
399 return err;
400
401 out:
402 ahash_restore_req(req, err);
403 return err;
404 }
405
406 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
407 {
408 struct ahash_request *areq = req->data;
409
410 if (err == -EINPROGRESS) {
411 ahash_notify_einprogress(areq);
412 return;
413 }
414
415 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
416
417 err = ahash_def_finup_finish1(areq, err);
418 if (areq->priv)
419 return;
420
421 areq->base.complete(&areq->base, err);
422 }
423
424 static int ahash_def_finup(struct ahash_request *req)
425 {
426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
427 int err;
428
429 err = ahash_save_req(req, ahash_def_finup_done1);
430 if (err)
431 return err;
432
433 err = tfm->update(req);
434 if (err == -EINPROGRESS ||
435 (err == -EBUSY && (ahash_request_flags(req) &
436 CRYPTO_TFM_REQ_MAY_BACKLOG)))
437 return err;
438
439 return ahash_def_finup_finish1(req, err);
440 }
441
442 static int ahash_no_export(struct ahash_request *req, void *out)
443 {
444 return -ENOSYS;
445 }
446
447 static int ahash_no_import(struct ahash_request *req, const void *in)
448 {
449 return -ENOSYS;
450 }
451
452 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
453 {
454 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
455 struct ahash_alg *alg = crypto_ahash_alg(hash);
456
457 hash->setkey = ahash_nosetkey;
458 hash->has_setkey = false;
459 hash->export = ahash_no_export;
460 hash->import = ahash_no_import;
461
462 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
463 return crypto_init_shash_ops_async(tfm);
464
465 hash->init = alg->init;
466 hash->update = alg->update;
467 hash->final = alg->final;
468 hash->finup = alg->finup ?: ahash_def_finup;
469 hash->digest = alg->digest;
470
471 if (alg->setkey) {
472 hash->setkey = alg->setkey;
473 hash->has_setkey = true;
474 }
475 if (alg->export)
476 hash->export = alg->export;
477 if (alg->import)
478 hash->import = alg->import;
479
480 return 0;
481 }
482
483 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
484 {
485 if (alg->cra_type != &crypto_ahash_type)
486 return sizeof(struct crypto_shash *);
487
488 return crypto_alg_extsize(alg);
489 }
490
491 #ifdef CONFIG_NET
492 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
493 {
494 struct crypto_report_hash rhash;
495
496 strncpy(rhash.type, "ahash", sizeof(rhash.type));
497
498 rhash.blocksize = alg->cra_blocksize;
499 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
500
501 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
502 sizeof(struct crypto_report_hash), &rhash))
503 goto nla_put_failure;
504 return 0;
505
506 nla_put_failure:
507 return -EMSGSIZE;
508 }
509 #else
510 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
511 {
512 return -ENOSYS;
513 }
514 #endif
515
516 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
517 __attribute__ ((unused));
518 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
519 {
520 seq_printf(m, "type : ahash\n");
521 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
522 "yes" : "no");
523 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
524 seq_printf(m, "digestsize : %u\n",
525 __crypto_hash_alg_common(alg)->digestsize);
526 }
527
528 const struct crypto_type crypto_ahash_type = {
529 .extsize = crypto_ahash_extsize,
530 .init_tfm = crypto_ahash_init_tfm,
531 #ifdef CONFIG_PROC_FS
532 .show = crypto_ahash_show,
533 #endif
534 .report = crypto_ahash_report,
535 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
536 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
537 .type = CRYPTO_ALG_TYPE_AHASH,
538 .tfmsize = offsetof(struct crypto_ahash, base),
539 };
540 EXPORT_SYMBOL_GPL(crypto_ahash_type);
541
542 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
543 u32 mask)
544 {
545 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
546 }
547 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
548
549 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
550 {
551 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
552 }
553 EXPORT_SYMBOL_GPL(crypto_has_ahash);
554
555 static int ahash_prepare_alg(struct ahash_alg *alg)
556 {
557 struct crypto_alg *base = &alg->halg.base;
558
559 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
560 alg->halg.statesize > PAGE_SIZE / 8 ||
561 alg->halg.statesize == 0)
562 return -EINVAL;
563
564 base->cra_type = &crypto_ahash_type;
565 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
566 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
567
568 return 0;
569 }
570
571 int crypto_register_ahash(struct ahash_alg *alg)
572 {
573 struct crypto_alg *base = &alg->halg.base;
574 int err;
575
576 err = ahash_prepare_alg(alg);
577 if (err)
578 return err;
579
580 return crypto_register_alg(base);
581 }
582 EXPORT_SYMBOL_GPL(crypto_register_ahash);
583
584 int crypto_unregister_ahash(struct ahash_alg *alg)
585 {
586 return crypto_unregister_alg(&alg->halg.base);
587 }
588 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
589
590 int ahash_register_instance(struct crypto_template *tmpl,
591 struct ahash_instance *inst)
592 {
593 int err;
594
595 err = ahash_prepare_alg(&inst->alg);
596 if (err)
597 return err;
598
599 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
600 }
601 EXPORT_SYMBOL_GPL(ahash_register_instance);
602
603 void ahash_free_instance(struct crypto_instance *inst)
604 {
605 crypto_drop_spawn(crypto_instance_ctx(inst));
606 kfree(ahash_instance(inst));
607 }
608 EXPORT_SYMBOL_GPL(ahash_free_instance);
609
610 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
611 struct hash_alg_common *alg,
612 struct crypto_instance *inst)
613 {
614 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
615 &crypto_ahash_type);
616 }
617 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
618
619 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
620 {
621 struct crypto_alg *alg;
622
623 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
624 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
625 }
626 EXPORT_SYMBOL_GPL(ahash_attr_alg);
627
628 MODULE_LICENSE("GPL");
629 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");