]>
Commit | Line | Data |
---|---|---|
004a403c LH |
1 | /* |
2 | * Asynchronous Cryptographic Hash operations. | |
3 | * | |
4 | * This is the asynchronous version of hash.c with notification of | |
5 | * completion via a callback. | |
6 | * | |
7 | * Copyright (c) 2008 Loc Ho <lho@amcc.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | * | |
14 | */ | |
15 | ||
20036252 HX |
16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/scatterwalk.h> | |
75ecb231 | 18 | #include <linux/bug.h> |
004a403c LH |
19 | #include <linux/err.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/seq_file.h> | |
6238cbae | 25 | #include <linux/cryptouser.h> |
d8c34b94 | 26 | #include <linux/compiler.h> |
6238cbae | 27 | #include <net/netlink.h> |
004a403c LH |
28 | |
29 | #include "internal.h" | |
30 | ||
66f6ce5e HX |
31 | struct ahash_request_priv { |
32 | crypto_completion_t complete; | |
33 | void *data; | |
34 | u8 *result; | |
ef0579b6 | 35 | u32 flags; |
66f6ce5e HX |
36 | void *ubuf[] CRYPTO_MINALIGN_ATTR; |
37 | }; | |
38 | ||
88056ec3 HX |
39 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) |
40 | { | |
41 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, | |
42 | halg); | |
43 | } | |
44 | ||
20036252 HX |
45 | static int hash_walk_next(struct crypto_hash_walk *walk) |
46 | { | |
47 | unsigned int alignmask = walk->alignmask; | |
48 | unsigned int offset = walk->offset; | |
49 | unsigned int nbytes = min(walk->entrylen, | |
50 | ((unsigned int)(PAGE_SIZE)) - offset); | |
51 | ||
75ecb231 HX |
52 | if (walk->flags & CRYPTO_ALG_ASYNC) |
53 | walk->data = kmap(walk->pg); | |
54 | else | |
55 | walk->data = kmap_atomic(walk->pg); | |
20036252 HX |
56 | walk->data += offset; |
57 | ||
23a75eee SÖ |
58 | if (offset & alignmask) { |
59 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); | |
b516d514 | 60 | |
23a75eee SÖ |
61 | if (nbytes > unaligned) |
62 | nbytes = unaligned; | |
63 | } | |
20036252 HX |
64 | |
65 | walk->entrylen -= nbytes; | |
66 | return nbytes; | |
67 | } | |
68 | ||
69 | static int hash_walk_new_entry(struct crypto_hash_walk *walk) | |
70 | { | |
71 | struct scatterlist *sg; | |
72 | ||
73 | sg = walk->sg; | |
20036252 | 74 | walk->offset = sg->offset; |
13f4bb78 HX |
75 | walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); |
76 | walk->offset = offset_in_page(walk->offset); | |
20036252 HX |
77 | walk->entrylen = sg->length; |
78 | ||
79 | if (walk->entrylen > walk->total) | |
80 | walk->entrylen = walk->total; | |
81 | walk->total -= walk->entrylen; | |
82 | ||
83 | return hash_walk_next(walk); | |
84 | } | |
85 | ||
86 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) | |
87 | { | |
88 | unsigned int alignmask = walk->alignmask; | |
20036252 HX |
89 | |
90 | walk->data -= walk->offset; | |
91 | ||
e793b1af EB |
92 | if (walk->entrylen && (walk->offset & alignmask) && !err) { |
93 | unsigned int nbytes; | |
20036252 | 94 | |
e793b1af EB |
95 | walk->offset = ALIGN(walk->offset, alignmask + 1); |
96 | nbytes = min(walk->entrylen, | |
97 | (unsigned int)(PAGE_SIZE - walk->offset)); | |
e4e4e332 | 98 | if (nbytes) { |
e793b1af | 99 | walk->entrylen -= nbytes; |
e4e4e332 HX |
100 | walk->data += walk->offset; |
101 | return nbytes; | |
102 | } | |
20036252 HX |
103 | } |
104 | ||
75ecb231 HX |
105 | if (walk->flags & CRYPTO_ALG_ASYNC) |
106 | kunmap(walk->pg); | |
107 | else { | |
108 | kunmap_atomic(walk->data); | |
109 | /* | |
110 | * The may sleep test only makes sense for sync users. | |
111 | * Async users don't need to sleep here anyway. | |
112 | */ | |
113 | crypto_yield(walk->flags); | |
114 | } | |
20036252 HX |
115 | |
116 | if (err) | |
117 | return err; | |
118 | ||
e793b1af | 119 | if (walk->entrylen) { |
d315a0e0 HX |
120 | walk->offset = 0; |
121 | walk->pg++; | |
20036252 | 122 | return hash_walk_next(walk); |
d315a0e0 | 123 | } |
20036252 HX |
124 | |
125 | if (!walk->total) | |
126 | return 0; | |
127 | ||
5be4d4c9 | 128 | walk->sg = sg_next(walk->sg); |
20036252 HX |
129 | |
130 | return hash_walk_new_entry(walk); | |
131 | } | |
132 | EXPORT_SYMBOL_GPL(crypto_hash_walk_done); | |
133 | ||
134 | int crypto_hash_walk_first(struct ahash_request *req, | |
135 | struct crypto_hash_walk *walk) | |
136 | { | |
137 | walk->total = req->nbytes; | |
138 | ||
6d9529c5 TC |
139 | if (!walk->total) { |
140 | walk->entrylen = 0; | |
20036252 | 141 | return 0; |
6d9529c5 | 142 | } |
20036252 HX |
143 | |
144 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | |
145 | walk->sg = req->src; | |
75ecb231 | 146 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
20036252 HX |
147 | |
148 | return hash_walk_new_entry(walk); | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(crypto_hash_walk_first); | |
151 | ||
75ecb231 HX |
152 | int crypto_ahash_walk_first(struct ahash_request *req, |
153 | struct crypto_hash_walk *walk) | |
154 | { | |
155 | walk->total = req->nbytes; | |
156 | ||
6d9529c5 TC |
157 | if (!walk->total) { |
158 | walk->entrylen = 0; | |
75ecb231 | 159 | return 0; |
6d9529c5 | 160 | } |
75ecb231 HX |
161 | |
162 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | |
163 | walk->sg = req->src; | |
164 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; | |
165 | walk->flags |= CRYPTO_ALG_ASYNC; | |
166 | ||
167 | BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); | |
168 | ||
169 | return hash_walk_new_entry(walk); | |
170 | } | |
171 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); | |
172 | ||
004a403c LH |
173 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
174 | unsigned int keylen) | |
175 | { | |
004a403c LH |
176 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
177 | int ret; | |
178 | u8 *buffer, *alignbuffer; | |
179 | unsigned long absize; | |
180 | ||
181 | absize = keylen + alignmask; | |
093900c2 | 182 | buffer = kmalloc(absize, GFP_KERNEL); |
004a403c LH |
183 | if (!buffer) |
184 | return -ENOMEM; | |
185 | ||
186 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
187 | memcpy(alignbuffer, key, keylen); | |
a70c5225 | 188 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
8c32c516 | 189 | kzfree(buffer); |
004a403c LH |
190 | return ret; |
191 | } | |
192 | ||
ff4a35c5 EB |
193 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
194 | unsigned int keylen) | |
195 | { | |
196 | return -ENOSYS; | |
197 | } | |
198 | ||
199 | static void ahash_set_needkey(struct crypto_ahash *tfm) | |
200 | { | |
201 | const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); | |
202 | ||
203 | if (tfm->setkey != ahash_nosetkey && | |
204 | !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) | |
205 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); | |
206 | } | |
207 | ||
66f6ce5e | 208 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
004a403c LH |
209 | unsigned int keylen) |
210 | { | |
004a403c | 211 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
65657355 | 212 | int err; |
004a403c LH |
213 | |
214 | if ((unsigned long)key & alignmask) | |
65657355 EB |
215 | err = ahash_setkey_unaligned(tfm, key, keylen); |
216 | else | |
217 | err = tfm->setkey(tfm, key, keylen); | |
218 | ||
ff4a35c5 EB |
219 | if (unlikely(err)) { |
220 | ahash_set_needkey(tfm); | |
65657355 | 221 | return err; |
ff4a35c5 | 222 | } |
004a403c | 223 | |
65657355 EB |
224 | crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
225 | return 0; | |
004a403c | 226 | } |
66f6ce5e | 227 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
004a403c | 228 | |
66f6ce5e HX |
229 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
230 | unsigned long mask) | |
231 | { | |
232 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | |
233 | } | |
234 | ||
1ffc9fbd | 235 | static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
66f6ce5e HX |
236 | { |
237 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
238 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | |
239 | unsigned int ds = crypto_ahash_digestsize(tfm); | |
240 | struct ahash_request_priv *priv; | |
66f6ce5e HX |
241 | |
242 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | |
243 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | |
5befbd5a | 244 | GFP_KERNEL : GFP_ATOMIC); |
66f6ce5e HX |
245 | if (!priv) |
246 | return -ENOMEM; | |
247 | ||
ab6bf4e5 MV |
248 | /* |
249 | * WARNING: Voodoo programming below! | |
250 | * | |
251 | * The code below is obscure and hard to understand, thus explanation | |
252 | * is necessary. See include/crypto/hash.h and include/linux/crypto.h | |
253 | * to understand the layout of structures used here! | |
254 | * | |
255 | * The code here will replace portions of the ORIGINAL request with | |
256 | * pointers to new code and buffers so the hashing operation can store | |
257 | * the result in aligned buffer. We will call the modified request | |
258 | * an ADJUSTED request. | |
259 | * | |
260 | * The newly mangled request will look as such: | |
261 | * | |
262 | * req { | |
263 | * .result = ADJUSTED[new aligned buffer] | |
264 | * .base.complete = ADJUSTED[pointer to completion function] | |
265 | * .base.data = ADJUSTED[*req (pointer to self)] | |
266 | * .priv = ADJUSTED[new priv] { | |
267 | * .result = ORIGINAL(result) | |
268 | * .complete = ORIGINAL(base.complete) | |
269 | * .data = ORIGINAL(base.data) | |
270 | * } | |
271 | */ | |
272 | ||
66f6ce5e HX |
273 | priv->result = req->result; |
274 | priv->complete = req->base.complete; | |
275 | priv->data = req->base.data; | |
ef0579b6 HX |
276 | priv->flags = req->base.flags; |
277 | ||
ab6bf4e5 MV |
278 | /* |
279 | * WARNING: We do not backup req->priv here! The req->priv | |
280 | * is for internal use of the Crypto API and the | |
281 | * user must _NOT_ _EVER_ depend on it's content! | |
282 | */ | |
66f6ce5e HX |
283 | |
284 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | |
1ffc9fbd | 285 | req->base.complete = cplt; |
66f6ce5e HX |
286 | req->base.data = req; |
287 | req->priv = priv; | |
288 | ||
1ffc9fbd MV |
289 | return 0; |
290 | } | |
291 | ||
ef0579b6 | 292 | static void ahash_restore_req(struct ahash_request *req, int err) |
1ffc9fbd MV |
293 | { |
294 | struct ahash_request_priv *priv = req->priv; | |
295 | ||
ef0579b6 HX |
296 | if (!err) |
297 | memcpy(priv->result, req->result, | |
298 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | |
299 | ||
1ffc9fbd MV |
300 | /* Restore the original crypto request. */ |
301 | req->result = priv->result; | |
ef0579b6 HX |
302 | |
303 | ahash_request_set_callback(req, priv->flags, | |
304 | priv->complete, priv->data); | |
1ffc9fbd MV |
305 | req->priv = NULL; |
306 | ||
307 | /* Free the req->priv.priv from the ADJUSTED request. */ | |
308 | kzfree(priv); | |
309 | } | |
310 | ||
ef0579b6 | 311 | static void ahash_notify_einprogress(struct ahash_request *req) |
1ffc9fbd MV |
312 | { |
313 | struct ahash_request_priv *priv = req->priv; | |
ef0579b6 | 314 | struct crypto_async_request oreq; |
1ffc9fbd | 315 | |
ef0579b6 | 316 | oreq.data = priv->data; |
1ffc9fbd | 317 | |
ef0579b6 | 318 | priv->complete(&oreq, -EINPROGRESS); |
1ffc9fbd MV |
319 | } |
320 | ||
321 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | |
322 | { | |
323 | struct ahash_request *areq = req->data; | |
324 | ||
ef0579b6 HX |
325 | if (err == -EINPROGRESS) { |
326 | ahash_notify_einprogress(areq); | |
327 | return; | |
328 | } | |
329 | ||
1ffc9fbd MV |
330 | /* |
331 | * Restore the original request, see ahash_op_unaligned() for what | |
332 | * goes where. | |
333 | * | |
334 | * The "struct ahash_request *req" here is in fact the "req.base" | |
335 | * from the ADJUSTED request from ahash_op_unaligned(), thus as it | |
336 | * is a pointer to self, it is also the ADJUSTED "req" . | |
337 | */ | |
338 | ||
339 | /* First copy req->result into req->priv.result */ | |
ef0579b6 | 340 | ahash_restore_req(areq, err); |
1ffc9fbd MV |
341 | |
342 | /* Complete the ORIGINAL request. */ | |
343 | areq->base.complete(&areq->base, err); | |
344 | } | |
345 | ||
346 | static int ahash_op_unaligned(struct ahash_request *req, | |
347 | int (*op)(struct ahash_request *)) | |
348 | { | |
349 | int err; | |
350 | ||
351 | err = ahash_save_req(req, ahash_op_unaligned_done); | |
352 | if (err) | |
353 | return err; | |
354 | ||
66f6ce5e | 355 | err = op(req); |
4e5b0ad5 | 356 | if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b6 HX |
357 | return err; |
358 | ||
359 | ahash_restore_req(req, err); | |
66f6ce5e HX |
360 | |
361 | return err; | |
362 | } | |
363 | ||
364 | static int crypto_ahash_op(struct ahash_request *req, | |
365 | int (*op)(struct ahash_request *)) | |
366 | { | |
367 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
368 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | |
369 | ||
370 | if ((unsigned long)req->result & alignmask) | |
371 | return ahash_op_unaligned(req, op); | |
372 | ||
373 | return op(req); | |
374 | } | |
375 | ||
376 | int crypto_ahash_final(struct ahash_request *req) | |
377 | { | |
378 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); | |
379 | } | |
380 | EXPORT_SYMBOL_GPL(crypto_ahash_final); | |
381 | ||
382 | int crypto_ahash_finup(struct ahash_request *req) | |
383 | { | |
384 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); | |
385 | } | |
386 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); | |
387 | ||
388 | int crypto_ahash_digest(struct ahash_request *req) | |
389 | { | |
65657355 EB |
390 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
391 | ||
392 | if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | |
393 | return -ENOKEY; | |
394 | ||
395 | return crypto_ahash_op(req, tfm->digest); | |
66f6ce5e HX |
396 | } |
397 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | |
398 | ||
ef0579b6 | 399 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) |
66f6ce5e | 400 | { |
ef0579b6 | 401 | struct ahash_request *areq = req->data; |
66f6ce5e HX |
402 | |
403 | if (err == -EINPROGRESS) | |
404 | return; | |
405 | ||
ef0579b6 | 406 | ahash_restore_req(areq, err); |
66f6ce5e | 407 | |
d4a7a0fb | 408 | areq->base.complete(&areq->base, err); |
66f6ce5e HX |
409 | } |
410 | ||
411 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | |
412 | { | |
413 | if (err) | |
414 | goto out; | |
415 | ||
416 | req->base.complete = ahash_def_finup_done2; | |
ef0579b6 | 417 | |
66f6ce5e | 418 | err = crypto_ahash_reqtfm(req)->final(req); |
4e5b0ad5 | 419 | if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b6 | 420 | return err; |
66f6ce5e HX |
421 | |
422 | out: | |
ef0579b6 | 423 | ahash_restore_req(req, err); |
66f6ce5e HX |
424 | return err; |
425 | } | |
426 | ||
427 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | |
428 | { | |
429 | struct ahash_request *areq = req->data; | |
66f6ce5e | 430 | |
ef0579b6 HX |
431 | if (err == -EINPROGRESS) { |
432 | ahash_notify_einprogress(areq); | |
433 | return; | |
434 | } | |
435 | ||
436 | areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | |
437 | ||
66f6ce5e | 438 | err = ahash_def_finup_finish1(areq, err); |
ef0579b6 HX |
439 | if (areq->priv) |
440 | return; | |
66f6ce5e | 441 | |
d4a7a0fb | 442 | areq->base.complete(&areq->base, err); |
66f6ce5e HX |
443 | } |
444 | ||
445 | static int ahash_def_finup(struct ahash_request *req) | |
446 | { | |
447 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
d4a7a0fb | 448 | int err; |
66f6ce5e | 449 | |
d4a7a0fb MV |
450 | err = ahash_save_req(req, ahash_def_finup_done1); |
451 | if (err) | |
452 | return err; | |
66f6ce5e | 453 | |
d4a7a0fb | 454 | err = tfm->update(req); |
4e5b0ad5 | 455 | if (err == -EINPROGRESS || err == -EBUSY) |
ef0579b6 HX |
456 | return err; |
457 | ||
d4a7a0fb | 458 | return ahash_def_finup_finish1(req, err); |
66f6ce5e HX |
459 | } |
460 | ||
461 | static int ahash_no_export(struct ahash_request *req, void *out) | |
462 | { | |
463 | return -ENOSYS; | |
464 | } | |
465 | ||
466 | static int ahash_no_import(struct ahash_request *req, const void *in) | |
467 | { | |
468 | return -ENOSYS; | |
469 | } | |
470 | ||
88056ec3 HX |
471 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
472 | { | |
473 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); | |
474 | struct ahash_alg *alg = crypto_ahash_alg(hash); | |
88056ec3 | 475 | |
66f6ce5e HX |
476 | hash->setkey = ahash_nosetkey; |
477 | hash->export = ahash_no_export; | |
478 | hash->import = ahash_no_import; | |
479 | ||
88056ec3 HX |
480 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) |
481 | return crypto_init_shash_ops_async(tfm); | |
482 | ||
88056ec3 HX |
483 | hash->init = alg->init; |
484 | hash->update = alg->update; | |
66f6ce5e HX |
485 | hash->final = alg->final; |
486 | hash->finup = alg->finup ?: ahash_def_finup; | |
88056ec3 | 487 | hash->digest = alg->digest; |
66f6ce5e | 488 | |
a5596d63 | 489 | if (alg->setkey) { |
66f6ce5e | 490 | hash->setkey = alg->setkey; |
ff4a35c5 | 491 | ahash_set_needkey(hash); |
a5596d63 | 492 | } |
66f6ce5e HX |
493 | if (alg->export) |
494 | hash->export = alg->export; | |
495 | if (alg->import) | |
496 | hash->import = alg->import; | |
88056ec3 HX |
497 | |
498 | return 0; | |
499 | } | |
500 | ||
501 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) | |
502 | { | |
2495cf25 HX |
503 | if (alg->cra_type != &crypto_ahash_type) |
504 | return sizeof(struct crypto_shash *); | |
88056ec3 | 505 | |
2495cf25 | 506 | return crypto_alg_extsize(alg); |
88056ec3 HX |
507 | } |
508 | ||
3acc8473 | 509 | #ifdef CONFIG_NET |
6238cbae SK |
510 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
511 | { | |
512 | struct crypto_report_hash rhash; | |
513 | ||
9a5467bf | 514 | strncpy(rhash.type, "ahash", sizeof(rhash.type)); |
6238cbae SK |
515 | |
516 | rhash.blocksize = alg->cra_blocksize; | |
517 | rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; | |
518 | ||
6662df33 DM |
519 | if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, |
520 | sizeof(struct crypto_report_hash), &rhash)) | |
521 | goto nla_put_failure; | |
6238cbae SK |
522 | return 0; |
523 | ||
524 | nla_put_failure: | |
525 | return -EMSGSIZE; | |
526 | } | |
3acc8473 HX |
527 | #else |
528 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) | |
529 | { | |
530 | return -ENOSYS; | |
531 | } | |
532 | #endif | |
6238cbae | 533 | |
004a403c | 534 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
d8c34b94 | 535 | __maybe_unused; |
004a403c LH |
536 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
537 | { | |
538 | seq_printf(m, "type : ahash\n"); | |
539 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | |
540 | "yes" : "no"); | |
541 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
88056ec3 HX |
542 | seq_printf(m, "digestsize : %u\n", |
543 | __crypto_hash_alg_common(alg)->digestsize); | |
004a403c LH |
544 | } |
545 | ||
546 | const struct crypto_type crypto_ahash_type = { | |
88056ec3 HX |
547 | .extsize = crypto_ahash_extsize, |
548 | .init_tfm = crypto_ahash_init_tfm, | |
004a403c LH |
549 | #ifdef CONFIG_PROC_FS |
550 | .show = crypto_ahash_show, | |
551 | #endif | |
6238cbae | 552 | .report = crypto_ahash_report, |
88056ec3 HX |
553 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
554 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, | |
555 | .type = CRYPTO_ALG_TYPE_AHASH, | |
556 | .tfmsize = offsetof(struct crypto_ahash, base), | |
004a403c LH |
557 | }; |
558 | EXPORT_SYMBOL_GPL(crypto_ahash_type); | |
559 | ||
88056ec3 HX |
560 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
561 | u32 mask) | |
562 | { | |
563 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); | |
564 | } | |
565 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | |
566 | ||
8d18e34c HX |
567 | int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) |
568 | { | |
569 | return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); | |
570 | } | |
571 | EXPORT_SYMBOL_GPL(crypto_has_ahash); | |
572 | ||
01c2dece HX |
573 | static int ahash_prepare_alg(struct ahash_alg *alg) |
574 | { | |
575 | struct crypto_alg *base = &alg->halg.base; | |
576 | ||
577 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | |
8996eafd RK |
578 | alg->halg.statesize > PAGE_SIZE / 8 || |
579 | alg->halg.statesize == 0) | |
01c2dece HX |
580 | return -EINVAL; |
581 | ||
582 | base->cra_type = &crypto_ahash_type; | |
583 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | |
584 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | int crypto_register_ahash(struct ahash_alg *alg) | |
590 | { | |
591 | struct crypto_alg *base = &alg->halg.base; | |
592 | int err; | |
593 | ||
594 | err = ahash_prepare_alg(alg); | |
595 | if (err) | |
596 | return err; | |
597 | ||
598 | return crypto_register_alg(base); | |
599 | } | |
600 | EXPORT_SYMBOL_GPL(crypto_register_ahash); | |
601 | ||
602 | int crypto_unregister_ahash(struct ahash_alg *alg) | |
603 | { | |
604 | return crypto_unregister_alg(&alg->halg.base); | |
605 | } | |
606 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); | |
607 | ||
6f7473c5 RV |
608 | int crypto_register_ahashes(struct ahash_alg *algs, int count) |
609 | { | |
610 | int i, ret; | |
611 | ||
612 | for (i = 0; i < count; i++) { | |
613 | ret = crypto_register_ahash(&algs[i]); | |
614 | if (ret) | |
615 | goto err; | |
616 | } | |
617 | ||
618 | return 0; | |
619 | ||
620 | err: | |
621 | for (--i; i >= 0; --i) | |
622 | crypto_unregister_ahash(&algs[i]); | |
623 | ||
624 | return ret; | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(crypto_register_ahashes); | |
627 | ||
628 | void crypto_unregister_ahashes(struct ahash_alg *algs, int count) | |
629 | { | |
630 | int i; | |
631 | ||
632 | for (i = count - 1; i >= 0; --i) | |
633 | crypto_unregister_ahash(&algs[i]); | |
634 | } | |
635 | EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); | |
636 | ||
01c2dece HX |
637 | int ahash_register_instance(struct crypto_template *tmpl, |
638 | struct ahash_instance *inst) | |
639 | { | |
640 | int err; | |
641 | ||
642 | err = ahash_prepare_alg(&inst->alg); | |
643 | if (err) | |
644 | return err; | |
645 | ||
646 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); | |
647 | } | |
648 | EXPORT_SYMBOL_GPL(ahash_register_instance); | |
649 | ||
650 | void ahash_free_instance(struct crypto_instance *inst) | |
651 | { | |
652 | crypto_drop_spawn(crypto_instance_ctx(inst)); | |
653 | kfree(ahash_instance(inst)); | |
654 | } | |
655 | EXPORT_SYMBOL_GPL(ahash_free_instance); | |
656 | ||
657 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | |
658 | struct hash_alg_common *alg, | |
659 | struct crypto_instance *inst) | |
660 | { | |
661 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | |
662 | &crypto_ahash_type); | |
663 | } | |
664 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); | |
665 | ||
666 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |
667 | { | |
668 | struct crypto_alg *alg; | |
669 | ||
670 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); | |
671 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | |
674 | ||
66397372 EB |
675 | bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) |
676 | { | |
677 | struct crypto_alg *alg = &halg->base; | |
678 | ||
679 | if (alg->cra_type != &crypto_ahash_type) | |
680 | return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); | |
681 | ||
682 | return __crypto_ahash_alg(alg)->setkey != NULL; | |
683 | } | |
684 | EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); | |
685 | ||
004a403c LH |
686 | MODULE_LICENSE("GPL"); |
687 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |