]>
Commit | Line | Data |
---|---|---|
0ab0a1d5 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) SHA crypto API support | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <crypto/algapi.h> | |
19 | #include <crypto/hash.h> | |
20 | #include <crypto/internal/hash.h> | |
21 | #include <crypto/sha.h> | |
22 | #include <crypto/scatterwalk.h> | |
23 | ||
24 | #include "ccp-crypto.h" | |
25 | ||
26 | ||
27 | struct ccp_sha_result { | |
28 | struct completion completion; | |
29 | int err; | |
30 | }; | |
31 | ||
32 | static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) | |
33 | { | |
34 | struct ccp_sha_result *result = req->data; | |
35 | ||
36 | if (err == -EINPROGRESS) | |
37 | return; | |
38 | ||
39 | result->err = err; | |
40 | complete(&result->completion); | |
41 | } | |
42 | ||
43 | static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, | |
44 | struct scatterlist *sg, unsigned int len) | |
45 | { | |
46 | struct ccp_sha_result result; | |
47 | struct ahash_request *req; | |
48 | int ret; | |
49 | ||
50 | init_completion(&result.completion); | |
51 | ||
52 | req = ahash_request_alloc(tfm, GFP_KERNEL); | |
53 | if (!req) | |
54 | return -ENOMEM; | |
55 | ||
56 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
57 | ccp_sync_hash_complete, &result); | |
58 | ahash_request_set_crypt(req, sg, buf, len); | |
59 | ||
60 | ret = crypto_ahash_digest(req); | |
61 | if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { | |
62 | ret = wait_for_completion_interruptible(&result.completion); | |
63 | if (!ret) | |
64 | ret = result.err; | |
65 | } | |
66 | ||
67 | ahash_request_free(req); | |
68 | ||
69 | return ret; | |
70 | } | |
71 | ||
72 | static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) | |
73 | { | |
74 | struct ahash_request *req = ahash_request_cast(async_req); | |
75 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
76 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | |
77 | struct scatterlist sg[2]; | |
78 | unsigned int block_size = | |
79 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
80 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | |
81 | ||
82 | sg_init_table(sg, ARRAY_SIZE(sg)); | |
83 | sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); | |
84 | sg_set_buf(&sg[1], req->result, digest_size); | |
85 | ||
86 | return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, | |
87 | block_size + digest_size); | |
88 | } | |
89 | ||
90 | static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | |
91 | { | |
92 | struct ahash_request *req = ahash_request_cast(async_req); | |
93 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
94 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | |
95 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
96 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | |
97 | ||
98 | if (ret) | |
99 | goto e_free; | |
100 | ||
101 | if (rctx->hash_rem) { | |
102 | /* Save remaining data to buffer */ | |
103 | scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.sha.src, | |
104 | rctx->hash_cnt, rctx->hash_rem, 0); | |
105 | rctx->buf_count = rctx->hash_rem; | |
106 | } else | |
107 | rctx->buf_count = 0; | |
108 | ||
109 | memcpy(req->result, rctx->ctx, digest_size); | |
110 | ||
111 | /* If we're doing an HMAC, we need to perform that on the final op */ | |
112 | if (rctx->final && ctx->u.sha.key_len) | |
113 | ret = ccp_sha_finish_hmac(async_req); | |
114 | ||
115 | e_free: | |
116 | sg_free_table(&rctx->data_sg); | |
117 | ||
118 | return ret; | |
119 | } | |
120 | ||
121 | static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |
122 | unsigned int final) | |
123 | { | |
124 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
125 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | |
126 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
127 | struct scatterlist *sg; | |
128 | unsigned int block_size = | |
129 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
130 | unsigned int len, sg_count; | |
5258de8a | 131 | gfp_t gfp; |
0ab0a1d5 TL |
132 | int ret; |
133 | ||
134 | if (!final && ((nbytes + rctx->buf_count) <= block_size)) { | |
135 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, | |
136 | 0, nbytes, 0); | |
137 | rctx->buf_count += nbytes; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | len = rctx->buf_count + nbytes; | |
143 | ||
144 | rctx->final = final; | |
145 | rctx->hash_cnt = final ? len : len & ~(block_size - 1); | |
146 | rctx->hash_rem = final ? 0 : len & (block_size - 1); | |
147 | if (!final && (rctx->hash_cnt == len)) { | |
148 | /* CCP can't do zero length final, so keep some data around */ | |
149 | rctx->hash_cnt -= block_size; | |
150 | rctx->hash_rem = block_size; | |
151 | } | |
152 | ||
153 | /* Initialize the context scatterlist */ | |
154 | sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); | |
155 | ||
156 | /* Build the data scatterlist table - allocate enough entries for all | |
157 | * possible data pieces (hmac ipad, buffer, input data) | |
158 | */ | |
159 | sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; | |
5258de8a TL |
160 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
161 | GFP_KERNEL : GFP_ATOMIC; | |
162 | ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); | |
0ab0a1d5 TL |
163 | if (ret) |
164 | return ret; | |
165 | ||
166 | sg = NULL; | |
167 | if (rctx->first && ctx->u.sha.key_len) { | |
168 | rctx->hash_cnt += block_size; | |
169 | ||
170 | sg_init_one(&rctx->pad_sg, ctx->u.sha.ipad, block_size); | |
171 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); | |
172 | } | |
173 | ||
174 | if (rctx->buf_count) { | |
175 | sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); | |
176 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); | |
177 | } | |
178 | ||
179 | if (nbytes) | |
180 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); | |
181 | ||
182 | if (sg) | |
183 | sg_mark_end(sg); | |
184 | ||
185 | rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ | |
186 | ||
187 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); | |
188 | INIT_LIST_HEAD(&rctx->cmd.entry); | |
189 | rctx->cmd.engine = CCP_ENGINE_SHA; | |
190 | rctx->cmd.u.sha.type = rctx->type; | |
191 | rctx->cmd.u.sha.ctx = &rctx->ctx_sg; | |
192 | rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); | |
193 | rctx->cmd.u.sha.src = (sg) ? rctx->data_sg.sgl : NULL; | |
194 | rctx->cmd.u.sha.src_len = rctx->hash_cnt; | |
195 | rctx->cmd.u.sha.final = rctx->final; | |
196 | rctx->cmd.u.sha.msg_bits = rctx->msg_bits; | |
197 | ||
198 | rctx->first = 0; | |
199 | ||
200 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); | |
201 | ||
202 | return ret; | |
203 | } | |
204 | ||
205 | static int ccp_sha_init(struct ahash_request *req) | |
206 | { | |
207 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
208 | struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); | |
209 | struct ccp_crypto_ahash_alg *alg = | |
210 | ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); | |
211 | ||
212 | memset(rctx, 0, sizeof(*rctx)); | |
213 | ||
214 | memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); | |
215 | rctx->type = alg->type; | |
216 | rctx->first = 1; | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | static int ccp_sha_update(struct ahash_request *req) | |
222 | { | |
223 | return ccp_do_sha_update(req, req->nbytes, 0); | |
224 | } | |
225 | ||
226 | static int ccp_sha_final(struct ahash_request *req) | |
227 | { | |
228 | return ccp_do_sha_update(req, 0, 1); | |
229 | } | |
230 | ||
231 | static int ccp_sha_finup(struct ahash_request *req) | |
232 | { | |
233 | return ccp_do_sha_update(req, req->nbytes, 1); | |
234 | } | |
235 | ||
236 | static int ccp_sha_digest(struct ahash_request *req) | |
237 | { | |
238 | ccp_sha_init(req); | |
239 | ||
240 | return ccp_do_sha_update(req, req->nbytes, 1); | |
241 | } | |
242 | ||
243 | static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |
244 | unsigned int key_len) | |
245 | { | |
246 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
247 | struct scatterlist sg; | |
248 | unsigned int block_size = | |
249 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
250 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | |
251 | int i, ret; | |
252 | ||
253 | /* Set to zero until complete */ | |
254 | ctx->u.sha.key_len = 0; | |
255 | ||
256 | /* Clear key area to provide zero padding for keys smaller | |
257 | * than the block size | |
258 | */ | |
259 | memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); | |
260 | ||
261 | if (key_len > block_size) { | |
262 | /* Must hash the input key */ | |
263 | sg_init_one(&sg, key, key_len); | |
264 | ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); | |
265 | if (ret) { | |
266 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
267 | return -EINVAL; | |
268 | } | |
269 | ||
270 | key_len = digest_size; | |
271 | } else | |
272 | memcpy(ctx->u.sha.key, key, key_len); | |
273 | ||
274 | for (i = 0; i < block_size; i++) { | |
275 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; | |
276 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; | |
277 | } | |
278 | ||
279 | ctx->u.sha.key_len = key_len; | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | static int ccp_sha_cra_init(struct crypto_tfm *tfm) | |
285 | { | |
286 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
287 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
288 | ||
289 | ctx->complete = ccp_sha_complete; | |
290 | ctx->u.sha.key_len = 0; | |
291 | ||
292 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | static void ccp_sha_cra_exit(struct crypto_tfm *tfm) | |
298 | { | |
299 | } | |
300 | ||
301 | static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) | |
302 | { | |
303 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
304 | struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); | |
305 | struct crypto_ahash *hmac_tfm; | |
306 | ||
307 | hmac_tfm = crypto_alloc_ahash(alg->child_alg, | |
308 | CRYPTO_ALG_TYPE_AHASH, 0); | |
309 | if (IS_ERR(hmac_tfm)) { | |
310 | pr_warn("could not load driver %s need for HMAC support\n", | |
311 | alg->child_alg); | |
312 | return PTR_ERR(hmac_tfm); | |
313 | } | |
314 | ||
315 | ctx->u.sha.hmac_tfm = hmac_tfm; | |
316 | ||
317 | return ccp_sha_cra_init(tfm); | |
318 | } | |
319 | ||
320 | static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) | |
321 | { | |
322 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
323 | ||
324 | if (ctx->u.sha.hmac_tfm) | |
325 | crypto_free_ahash(ctx->u.sha.hmac_tfm); | |
326 | ||
327 | ccp_sha_cra_exit(tfm); | |
328 | } | |
329 | ||
6f0be9b2 | 330 | static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { |
0ab0a1d5 TL |
331 | cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), |
332 | cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), | |
333 | cpu_to_be32(SHA1_H4), 0, 0, 0, | |
334 | }; | |
335 | ||
6f0be9b2 | 336 | static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { |
0ab0a1d5 TL |
337 | cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), |
338 | cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), | |
339 | cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), | |
340 | cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), | |
341 | }; | |
342 | ||
6f0be9b2 | 343 | static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { |
0ab0a1d5 TL |
344 | cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), |
345 | cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), | |
346 | cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), | |
347 | cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), | |
348 | }; | |
349 | ||
350 | struct ccp_sha_def { | |
351 | const char *name; | |
352 | const char *drv_name; | |
6f0be9b2 | 353 | const __be32 *init; |
0ab0a1d5 TL |
354 | enum ccp_sha_type type; |
355 | u32 digest_size; | |
356 | u32 block_size; | |
357 | }; | |
358 | ||
359 | static struct ccp_sha_def sha_algs[] = { | |
360 | { | |
361 | .name = "sha1", | |
362 | .drv_name = "sha1-ccp", | |
363 | .init = sha1_init, | |
364 | .type = CCP_SHA_TYPE_1, | |
365 | .digest_size = SHA1_DIGEST_SIZE, | |
366 | .block_size = SHA1_BLOCK_SIZE, | |
367 | }, | |
368 | { | |
369 | .name = "sha224", | |
370 | .drv_name = "sha224-ccp", | |
371 | .init = sha224_init, | |
372 | .type = CCP_SHA_TYPE_224, | |
373 | .digest_size = SHA224_DIGEST_SIZE, | |
374 | .block_size = SHA224_BLOCK_SIZE, | |
375 | }, | |
376 | { | |
377 | .name = "sha256", | |
378 | .drv_name = "sha256-ccp", | |
379 | .init = sha256_init, | |
380 | .type = CCP_SHA_TYPE_256, | |
381 | .digest_size = SHA256_DIGEST_SIZE, | |
382 | .block_size = SHA256_BLOCK_SIZE, | |
383 | }, | |
384 | }; | |
385 | ||
386 | static int ccp_register_hmac_alg(struct list_head *head, | |
387 | const struct ccp_sha_def *def, | |
388 | const struct ccp_crypto_ahash_alg *base_alg) | |
389 | { | |
390 | struct ccp_crypto_ahash_alg *ccp_alg; | |
391 | struct ahash_alg *alg; | |
392 | struct hash_alg_common *halg; | |
393 | struct crypto_alg *base; | |
394 | int ret; | |
395 | ||
396 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); | |
397 | if (!ccp_alg) | |
398 | return -ENOMEM; | |
399 | ||
400 | /* Copy the base algorithm and only change what's necessary */ | |
d1dd206c | 401 | *ccp_alg = *base_alg; |
0ab0a1d5 TL |
402 | INIT_LIST_HEAD(&ccp_alg->entry); |
403 | ||
404 | strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); | |
405 | ||
406 | alg = &ccp_alg->alg; | |
407 | alg->setkey = ccp_sha_setkey; | |
408 | ||
409 | halg = &alg->halg; | |
410 | ||
411 | base = &halg->base; | |
412 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); | |
413 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", | |
414 | def->drv_name); | |
415 | base->cra_init = ccp_hmac_sha_cra_init; | |
416 | base->cra_exit = ccp_hmac_sha_cra_exit; | |
417 | ||
418 | ret = crypto_register_ahash(alg); | |
419 | if (ret) { | |
420 | pr_err("%s ahash algorithm registration error (%d)\n", | |
421 | base->cra_name, ret); | |
422 | kfree(ccp_alg); | |
423 | return ret; | |
424 | } | |
425 | ||
426 | list_add(&ccp_alg->entry, head); | |
427 | ||
428 | return ret; | |
429 | } | |
430 | ||
431 | static int ccp_register_sha_alg(struct list_head *head, | |
432 | const struct ccp_sha_def *def) | |
433 | { | |
434 | struct ccp_crypto_ahash_alg *ccp_alg; | |
435 | struct ahash_alg *alg; | |
436 | struct hash_alg_common *halg; | |
437 | struct crypto_alg *base; | |
438 | int ret; | |
439 | ||
440 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); | |
441 | if (!ccp_alg) | |
442 | return -ENOMEM; | |
443 | ||
444 | INIT_LIST_HEAD(&ccp_alg->entry); | |
445 | ||
446 | ccp_alg->init = def->init; | |
447 | ccp_alg->type = def->type; | |
448 | ||
449 | alg = &ccp_alg->alg; | |
450 | alg->init = ccp_sha_init; | |
451 | alg->update = ccp_sha_update; | |
452 | alg->final = ccp_sha_final; | |
453 | alg->finup = ccp_sha_finup; | |
454 | alg->digest = ccp_sha_digest; | |
455 | ||
456 | halg = &alg->halg; | |
457 | halg->digestsize = def->digest_size; | |
458 | ||
459 | base = &halg->base; | |
460 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | |
461 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
462 | def->drv_name); | |
463 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | | |
464 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
465 | CRYPTO_ALG_NEED_FALLBACK; | |
466 | base->cra_blocksize = def->block_size; | |
467 | base->cra_ctxsize = sizeof(struct ccp_ctx); | |
468 | base->cra_priority = CCP_CRA_PRIORITY; | |
469 | base->cra_type = &crypto_ahash_type; | |
470 | base->cra_init = ccp_sha_cra_init; | |
471 | base->cra_exit = ccp_sha_cra_exit; | |
472 | base->cra_module = THIS_MODULE; | |
473 | ||
474 | ret = crypto_register_ahash(alg); | |
475 | if (ret) { | |
476 | pr_err("%s ahash algorithm registration error (%d)\n", | |
477 | base->cra_name, ret); | |
478 | kfree(ccp_alg); | |
479 | return ret; | |
480 | } | |
481 | ||
482 | list_add(&ccp_alg->entry, head); | |
483 | ||
484 | ret = ccp_register_hmac_alg(head, def, ccp_alg); | |
485 | ||
486 | return ret; | |
487 | } | |
488 | ||
489 | int ccp_register_sha_algs(struct list_head *head) | |
490 | { | |
491 | int i, ret; | |
492 | ||
493 | for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { | |
494 | ret = ccp_register_sha_alg(head, &sha_algs[i]); | |
495 | if (ret) | |
496 | return ret; | |
497 | } | |
498 | ||
499 | return 0; | |
500 | } |