]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/ahash.c
x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
[mirror_ubuntu-artful-kernel.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <linux/compiler.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 struct ahash_request_priv {
32 crypto_completion_t complete;
33 void *data;
34 u8 *result;
35 u32 flags;
36 void *ubuf[] CRYPTO_MINALIGN_ATTR;
37 };
38
39 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40 {
41 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42 halg);
43 }
44
45 static int hash_walk_next(struct crypto_hash_walk *walk)
46 {
47 unsigned int alignmask = walk->alignmask;
48 unsigned int offset = walk->offset;
49 unsigned int nbytes = min(walk->entrylen,
50 ((unsigned int)(PAGE_SIZE)) - offset);
51
52 if (walk->flags & CRYPTO_ALG_ASYNC)
53 walk->data = kmap(walk->pg);
54 else
55 walk->data = kmap_atomic(walk->pg);
56 walk->data += offset;
57
58 if (offset & alignmask) {
59 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60
61 if (nbytes > unaligned)
62 nbytes = unaligned;
63 }
64
65 walk->entrylen -= nbytes;
66 return nbytes;
67 }
68
69 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70 {
71 struct scatterlist *sg;
72
73 sg = walk->sg;
74 walk->offset = sg->offset;
75 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76 walk->offset = offset_in_page(walk->offset);
77 walk->entrylen = sg->length;
78
79 if (walk->entrylen > walk->total)
80 walk->entrylen = walk->total;
81 walk->total -= walk->entrylen;
82
83 return hash_walk_next(walk);
84 }
85
86 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87 {
88 unsigned int alignmask = walk->alignmask;
89 unsigned int nbytes = walk->entrylen;
90
91 walk->data -= walk->offset;
92
93 if (nbytes && walk->offset & alignmask && !err) {
94 walk->offset = ALIGN(walk->offset, alignmask + 1);
95 walk->data += walk->offset;
96
97 nbytes = min(nbytes,
98 ((unsigned int)(PAGE_SIZE)) - walk->offset);
99 walk->entrylen -= nbytes;
100
101 return nbytes;
102 }
103
104 if (walk->flags & CRYPTO_ALG_ASYNC)
105 kunmap(walk->pg);
106 else {
107 kunmap_atomic(walk->data);
108 /*
109 * The may sleep test only makes sense for sync users.
110 * Async users don't need to sleep here anyway.
111 */
112 crypto_yield(walk->flags);
113 }
114
115 if (err)
116 return err;
117
118 if (nbytes) {
119 walk->offset = 0;
120 walk->pg++;
121 return hash_walk_next(walk);
122 }
123
124 if (!walk->total)
125 return 0;
126
127 walk->sg = sg_next(walk->sg);
128
129 return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
132
133 int crypto_hash_walk_first(struct ahash_request *req,
134 struct crypto_hash_walk *walk)
135 {
136 walk->total = req->nbytes;
137
138 if (!walk->total) {
139 walk->entrylen = 0;
140 return 0;
141 }
142
143 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
144 walk->sg = req->src;
145 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
146
147 return hash_walk_new_entry(walk);
148 }
149 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
150
151 int crypto_ahash_walk_first(struct ahash_request *req,
152 struct crypto_hash_walk *walk)
153 {
154 walk->total = req->nbytes;
155
156 if (!walk->total) {
157 walk->entrylen = 0;
158 return 0;
159 }
160
161 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
162 walk->sg = req->src;
163 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
164 walk->flags |= CRYPTO_ALG_ASYNC;
165
166 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
167
168 return hash_walk_new_entry(walk);
169 }
170 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
171
172 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
173 unsigned int keylen)
174 {
175 unsigned long alignmask = crypto_ahash_alignmask(tfm);
176 int ret;
177 u8 *buffer, *alignbuffer;
178 unsigned long absize;
179
180 absize = keylen + alignmask;
181 buffer = kmalloc(absize, GFP_KERNEL);
182 if (!buffer)
183 return -ENOMEM;
184
185 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
186 memcpy(alignbuffer, key, keylen);
187 ret = tfm->setkey(tfm, alignbuffer, keylen);
188 kzfree(buffer);
189 return ret;
190 }
191
192 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
193 unsigned int keylen)
194 {
195 unsigned long alignmask = crypto_ahash_alignmask(tfm);
196
197 if ((unsigned long)key & alignmask)
198 return ahash_setkey_unaligned(tfm, key, keylen);
199
200 return tfm->setkey(tfm, key, keylen);
201 }
202 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
203
204 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
205 unsigned int keylen)
206 {
207 return -ENOSYS;
208 }
209
210 static inline unsigned int ahash_align_buffer_size(unsigned len,
211 unsigned long mask)
212 {
213 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
214 }
215
216 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
217 {
218 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
219 unsigned long alignmask = crypto_ahash_alignmask(tfm);
220 unsigned int ds = crypto_ahash_digestsize(tfm);
221 struct ahash_request_priv *priv;
222
223 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
224 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
225 GFP_KERNEL : GFP_ATOMIC);
226 if (!priv)
227 return -ENOMEM;
228
229 /*
230 * WARNING: Voodoo programming below!
231 *
232 * The code below is obscure and hard to understand, thus explanation
233 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
234 * to understand the layout of structures used here!
235 *
236 * The code here will replace portions of the ORIGINAL request with
237 * pointers to new code and buffers so the hashing operation can store
238 * the result in aligned buffer. We will call the modified request
239 * an ADJUSTED request.
240 *
241 * The newly mangled request will look as such:
242 *
243 * req {
244 * .result = ADJUSTED[new aligned buffer]
245 * .base.complete = ADJUSTED[pointer to completion function]
246 * .base.data = ADJUSTED[*req (pointer to self)]
247 * .priv = ADJUSTED[new priv] {
248 * .result = ORIGINAL(result)
249 * .complete = ORIGINAL(base.complete)
250 * .data = ORIGINAL(base.data)
251 * }
252 */
253
254 priv->result = req->result;
255 priv->complete = req->base.complete;
256 priv->data = req->base.data;
257 priv->flags = req->base.flags;
258
259 /*
260 * WARNING: We do not backup req->priv here! The req->priv
261 * is for internal use of the Crypto API and the
262 * user must _NOT_ _EVER_ depend on it's content!
263 */
264
265 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
266 req->base.complete = cplt;
267 req->base.data = req;
268 req->priv = priv;
269
270 return 0;
271 }
272
273 static void ahash_restore_req(struct ahash_request *req, int err)
274 {
275 struct ahash_request_priv *priv = req->priv;
276
277 if (!err)
278 memcpy(priv->result, req->result,
279 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
280
281 /* Restore the original crypto request. */
282 req->result = priv->result;
283
284 ahash_request_set_callback(req, priv->flags,
285 priv->complete, priv->data);
286 req->priv = NULL;
287
288 /* Free the req->priv.priv from the ADJUSTED request. */
289 kzfree(priv);
290 }
291
292 static void ahash_notify_einprogress(struct ahash_request *req)
293 {
294 struct ahash_request_priv *priv = req->priv;
295 struct crypto_async_request oreq;
296
297 oreq.data = priv->data;
298
299 priv->complete(&oreq, -EINPROGRESS);
300 }
301
302 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
303 {
304 struct ahash_request *areq = req->data;
305
306 if (err == -EINPROGRESS) {
307 ahash_notify_einprogress(areq);
308 return;
309 }
310
311 /*
312 * Restore the original request, see ahash_op_unaligned() for what
313 * goes where.
314 *
315 * The "struct ahash_request *req" here is in fact the "req.base"
316 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
317 * is a pointer to self, it is also the ADJUSTED "req" .
318 */
319
320 /* First copy req->result into req->priv.result */
321 ahash_restore_req(areq, err);
322
323 /* Complete the ORIGINAL request. */
324 areq->base.complete(&areq->base, err);
325 }
326
327 static int ahash_op_unaligned(struct ahash_request *req,
328 int (*op)(struct ahash_request *))
329 {
330 int err;
331
332 err = ahash_save_req(req, ahash_op_unaligned_done);
333 if (err)
334 return err;
335
336 err = op(req);
337 if (err == -EINPROGRESS ||
338 (err == -EBUSY && (ahash_request_flags(req) &
339 CRYPTO_TFM_REQ_MAY_BACKLOG)))
340 return err;
341
342 ahash_restore_req(req, err);
343
344 return err;
345 }
346
347 static int crypto_ahash_op(struct ahash_request *req,
348 int (*op)(struct ahash_request *))
349 {
350 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
351 unsigned long alignmask = crypto_ahash_alignmask(tfm);
352
353 if ((unsigned long)req->result & alignmask)
354 return ahash_op_unaligned(req, op);
355
356 return op(req);
357 }
358
359 int crypto_ahash_final(struct ahash_request *req)
360 {
361 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
362 }
363 EXPORT_SYMBOL_GPL(crypto_ahash_final);
364
365 int crypto_ahash_finup(struct ahash_request *req)
366 {
367 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
368 }
369 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
370
371 int crypto_ahash_digest(struct ahash_request *req)
372 {
373 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
374 }
375 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
376
377 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
378 {
379 struct ahash_request *areq = req->data;
380
381 if (err == -EINPROGRESS)
382 return;
383
384 ahash_restore_req(areq, err);
385
386 areq->base.complete(&areq->base, err);
387 }
388
389 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
390 {
391 if (err)
392 goto out;
393
394 req->base.complete = ahash_def_finup_done2;
395
396 err = crypto_ahash_reqtfm(req)->final(req);
397 if (err == -EINPROGRESS ||
398 (err == -EBUSY && (ahash_request_flags(req) &
399 CRYPTO_TFM_REQ_MAY_BACKLOG)))
400 return err;
401
402 out:
403 ahash_restore_req(req, err);
404 return err;
405 }
406
407 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
408 {
409 struct ahash_request *areq = req->data;
410
411 if (err == -EINPROGRESS) {
412 ahash_notify_einprogress(areq);
413 return;
414 }
415
416 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
417
418 err = ahash_def_finup_finish1(areq, err);
419 if (areq->priv)
420 return;
421
422 areq->base.complete(&areq->base, err);
423 }
424
425 static int ahash_def_finup(struct ahash_request *req)
426 {
427 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
428 int err;
429
430 err = ahash_save_req(req, ahash_def_finup_done1);
431 if (err)
432 return err;
433
434 err = tfm->update(req);
435 if (err == -EINPROGRESS ||
436 (err == -EBUSY && (ahash_request_flags(req) &
437 CRYPTO_TFM_REQ_MAY_BACKLOG)))
438 return err;
439
440 return ahash_def_finup_finish1(req, err);
441 }
442
443 static int ahash_no_export(struct ahash_request *req, void *out)
444 {
445 return -ENOSYS;
446 }
447
448 static int ahash_no_import(struct ahash_request *req, const void *in)
449 {
450 return -ENOSYS;
451 }
452
453 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
454 {
455 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
456 struct ahash_alg *alg = crypto_ahash_alg(hash);
457
458 hash->setkey = ahash_nosetkey;
459 hash->has_setkey = false;
460 hash->export = ahash_no_export;
461 hash->import = ahash_no_import;
462
463 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
464 return crypto_init_shash_ops_async(tfm);
465
466 hash->init = alg->init;
467 hash->update = alg->update;
468 hash->final = alg->final;
469 hash->finup = alg->finup ?: ahash_def_finup;
470 hash->digest = alg->digest;
471
472 if (alg->setkey) {
473 hash->setkey = alg->setkey;
474 hash->has_setkey = true;
475 }
476 if (alg->export)
477 hash->export = alg->export;
478 if (alg->import)
479 hash->import = alg->import;
480
481 return 0;
482 }
483
484 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
485 {
486 if (alg->cra_type != &crypto_ahash_type)
487 return sizeof(struct crypto_shash *);
488
489 return crypto_alg_extsize(alg);
490 }
491
492 #ifdef CONFIG_NET
493 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
494 {
495 struct crypto_report_hash rhash;
496
497 strncpy(rhash.type, "ahash", sizeof(rhash.type));
498
499 rhash.blocksize = alg->cra_blocksize;
500 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
501
502 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
503 sizeof(struct crypto_report_hash), &rhash))
504 goto nla_put_failure;
505 return 0;
506
507 nla_put_failure:
508 return -EMSGSIZE;
509 }
510 #else
511 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
512 {
513 return -ENOSYS;
514 }
515 #endif
516
517 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
518 __maybe_unused;
519 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
520 {
521 seq_printf(m, "type : ahash\n");
522 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
523 "yes" : "no");
524 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
525 seq_printf(m, "digestsize : %u\n",
526 __crypto_hash_alg_common(alg)->digestsize);
527 }
528
529 const struct crypto_type crypto_ahash_type = {
530 .extsize = crypto_ahash_extsize,
531 .init_tfm = crypto_ahash_init_tfm,
532 #ifdef CONFIG_PROC_FS
533 .show = crypto_ahash_show,
534 #endif
535 .report = crypto_ahash_report,
536 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
537 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
538 .type = CRYPTO_ALG_TYPE_AHASH,
539 .tfmsize = offsetof(struct crypto_ahash, base),
540 };
541 EXPORT_SYMBOL_GPL(crypto_ahash_type);
542
543 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
544 u32 mask)
545 {
546 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
547 }
548 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
549
550 int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
551 {
552 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
553 }
554 EXPORT_SYMBOL_GPL(crypto_has_ahash);
555
556 static int ahash_prepare_alg(struct ahash_alg *alg)
557 {
558 struct crypto_alg *base = &alg->halg.base;
559
560 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
561 alg->halg.statesize > PAGE_SIZE / 8 ||
562 alg->halg.statesize == 0)
563 return -EINVAL;
564
565 base->cra_type = &crypto_ahash_type;
566 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
567 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
568
569 return 0;
570 }
571
572 int crypto_register_ahash(struct ahash_alg *alg)
573 {
574 struct crypto_alg *base = &alg->halg.base;
575 int err;
576
577 err = ahash_prepare_alg(alg);
578 if (err)
579 return err;
580
581 return crypto_register_alg(base);
582 }
583 EXPORT_SYMBOL_GPL(crypto_register_ahash);
584
585 int crypto_unregister_ahash(struct ahash_alg *alg)
586 {
587 return crypto_unregister_alg(&alg->halg.base);
588 }
589 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
590
591 int ahash_register_instance(struct crypto_template *tmpl,
592 struct ahash_instance *inst)
593 {
594 int err;
595
596 err = ahash_prepare_alg(&inst->alg);
597 if (err)
598 return err;
599
600 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
601 }
602 EXPORT_SYMBOL_GPL(ahash_register_instance);
603
604 void ahash_free_instance(struct crypto_instance *inst)
605 {
606 crypto_drop_spawn(crypto_instance_ctx(inst));
607 kfree(ahash_instance(inst));
608 }
609 EXPORT_SYMBOL_GPL(ahash_free_instance);
610
611 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
612 struct hash_alg_common *alg,
613 struct crypto_instance *inst)
614 {
615 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
616 &crypto_ahash_type);
617 }
618 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
619
620 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
621 {
622 struct crypto_alg *alg;
623
624 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
625 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
626 }
627 EXPORT_SYMBOL_GPL(ahash_attr_alg);
628
629 MODULE_LICENSE("GPL");
630 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");