]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/caam/caamhash.c
crypto: caam - refactor encryption descriptors generation
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / caam / caamhash.c
CommitLineData
045e3678
YK
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
39957c8e 75#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
045e3678
YK
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
045e3678
YK
92#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
cfc6f11b
RG
97
98static struct list_head hash_list;
99
045e3678
YK
100/* ahash per-session context */
101struct caam_hash_ctx {
e11793f5
RK
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
e11793f5 106 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
045e3678
YK
107 dma_addr_t sh_desc_update_first_dma;
108 dma_addr_t sh_desc_fin_dma;
109 dma_addr_t sh_desc_digest_dma;
e11793f5 110 struct device *jrdev;
045e3678
YK
111 u8 key[CAAM_MAX_HASH_KEY_SIZE];
112 dma_addr_t key_dma;
113 int ctx_len;
db57656b 114 struct alginfo adata;
045e3678
YK
115};
116
117/* ahash state */
118struct caam_hash_state {
119 dma_addr_t buf_dma;
120 dma_addr_t ctx_dma;
121 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
122 int buflen_0;
123 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
124 int buflen_1;
e7472422 125 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
045e3678
YK
126 int (*update)(struct ahash_request *req);
127 int (*final)(struct ahash_request *req);
128 int (*finup)(struct ahash_request *req);
129 int current_buf;
130};
131
5ec90831
RK
132struct caam_export_state {
133 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
134 u8 caam_ctx[MAX_CTX_LEN];
135 int buflen;
136 int (*update)(struct ahash_request *req);
137 int (*final)(struct ahash_request *req);
138 int (*finup)(struct ahash_request *req);
139};
140
045e3678
YK
141/* Common job descriptor seq in/out ptr routines */
142
143/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
ce572085
HG
144static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
145 struct caam_hash_state *state,
146 int ctx_len)
045e3678
YK
147{
148 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 ctx_len, DMA_FROM_DEVICE);
ce572085
HG
150 if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 dev_err(jrdev, "unable to map ctx\n");
152 return -ENOMEM;
153 }
154
045e3678 155 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
ce572085
HG
156
157 return 0;
045e3678
YK
158}
159
160/* Map req->result, and append seq_out_ptr command that points to it */
161static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
162 u8 *result, int digestsize)
163{
164 dma_addr_t dst_dma;
165
166 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
167 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168
169 return dst_dma;
170}
171
172/* Map current buffer in state and put it in link table */
173static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
174 struct sec4_sg_entry *sec4_sg,
175 u8 *buf, int buflen)
176{
177 dma_addr_t buf_dma;
178
179 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
180 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181
182 return buf_dma;
183}
184
045e3678
YK
185/*
186 * Only put buffer in link table if it contains data, which is possible,
187 * since a buffer has previously been used, and needs to be unmapped,
188 */
189static inline dma_addr_t
190try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 u8 *buf, dma_addr_t buf_dma, int buflen,
192 int last_buflen)
193{
194 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 if (buflen)
197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 else
199 buf_dma = 0;
200
201 return buf_dma;
202}
203
204/* Map state->caam_ctx, and add it to link table */
ce572085
HG
205static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 struct caam_hash_state *state, int ctx_len,
207 struct sec4_sg_entry *sec4_sg, u32 flag)
045e3678
YK
208{
209 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
ce572085
HG
210 if (dma_mapping_error(jrdev, state->ctx_dma)) {
211 dev_err(jrdev, "unable to map ctx\n");
212 return -ENOMEM;
213 }
214
045e3678 215 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
ce572085
HG
216
217 return 0;
045e3678
YK
218}
219
220/* Common shared descriptor commands */
221static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
222{
db57656b
HG
223 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
224 ctx->adata.keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
225 KEY_ENC);
045e3678
YK
226}
227
228/* Append key if it has been set */
229static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
230{
231 u32 *key_jump_cmd;
232
61bb86bb 233 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678 234
db57656b 235 if (ctx->adata.keylen) {
045e3678
YK
236 /* Skip if already shared */
237 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238 JUMP_COND_SHRD);
239
240 append_key_ahash(desc, ctx);
241
242 set_jump_tgt_here(desc, key_jump_cmd);
243 }
045e3678
YK
244}
245
246/*
247 * For ahash read data from seqin following state->caam_ctx,
248 * and write resulting class2 context to seqout, which may be state->caam_ctx
249 * or req->result
250 */
251static inline void ahash_append_load_str(u32 *desc, int digestsize)
252{
253 /* Calculate remaining bytes to read */
254 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
255
256 /* Read remaining bytes */
257 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
258 FIFOLD_TYPE_MSG | KEY_VLF);
259
260 /* Store class2 context bytes */
261 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
262 LDST_SRCDST_BYTE_CONTEXT);
263}
264
265/*
266 * For ahash update, final and finup, import context, read and write to seqout
267 */
268static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
269 int digestsize,
270 struct caam_hash_ctx *ctx)
271{
272 init_sh_desc_key_ahash(desc, ctx);
273
274 /* Import context from software */
281669df
HG
275 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
276 LDST_SRCDST_BYTE_CONTEXT);
045e3678
YK
277
278 /* Class 2 operation */
279 append_operation(desc, op | state | OP_ALG_ENCRYPT);
280
281 /*
282 * Load from buf and/or src and write to req->result or state->context
283 */
284 ahash_append_load_str(desc, digestsize);
285}
286
287/* For ahash firsts and digest, read and write to seqout */
288static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
289 int digestsize, struct caam_hash_ctx *ctx)
290{
291 init_sh_desc_key_ahash(desc, ctx);
292
293 /* Class 2 operation */
294 append_operation(desc, op | state | OP_ALG_ENCRYPT);
295
296 /*
297 * Load from buf and/or src and write to req->result or state->context
298 */
299 ahash_append_load_str(desc, digestsize);
300}
301
302static int ahash_set_sh_desc(struct crypto_ahash *ahash)
303{
304 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
305 int digestsize = crypto_ahash_digestsize(ahash);
306 struct device *jrdev = ctx->jrdev;
307 u32 have_key = 0;
308 u32 *desc;
309
db57656b 310 if (ctx->adata.keylen)
045e3678
YK
311 have_key = OP_ALG_AAI_HMAC_PRECOMP;
312
313 /* ahash_update shared descriptor */
314 desc = ctx->sh_desc_update;
315
61bb86bb 316 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
317
318 /* Import context from software */
281669df
HG
319 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
320 LDST_SRCDST_BYTE_CONTEXT);
045e3678
YK
321
322 /* Class 2 operation */
db57656b 323 append_operation(desc, ctx->adata.algtype | OP_ALG_AS_UPDATE |
045e3678
YK
324 OP_ALG_ENCRYPT);
325
326 /* Load data and write to result or context */
327 ahash_append_load_str(desc, ctx->ctx_len);
328
329 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
330 DMA_TO_DEVICE);
331 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
332 dev_err(jrdev, "unable to map shared descriptor\n");
333 return -ENOMEM;
334 }
335#ifdef DEBUG
514df281
AP
336 print_hex_dump(KERN_ERR,
337 "ahash update shdesc@"__stringify(__LINE__)": ",
045e3678
YK
338 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
339#endif
340
341 /* ahash_update_first shared descriptor */
342 desc = ctx->sh_desc_update_first;
343
db57656b 344 ahash_data_to_out(desc, have_key | ctx->adata.algtype, OP_ALG_AS_INIT,
045e3678
YK
345 ctx->ctx_len, ctx);
346
347 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
348 desc_bytes(desc),
349 DMA_TO_DEVICE);
350 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
351 dev_err(jrdev, "unable to map shared descriptor\n");
352 return -ENOMEM;
353 }
354#ifdef DEBUG
514df281
AP
355 print_hex_dump(KERN_ERR,
356 "ahash update first shdesc@"__stringify(__LINE__)": ",
045e3678
YK
357 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
358#endif
359
360 /* ahash_final shared descriptor */
361 desc = ctx->sh_desc_fin;
362
db57656b 363 ahash_ctx_data_to_out(desc, have_key | ctx->adata.algtype,
045e3678
YK
364 OP_ALG_AS_FINALIZE, digestsize, ctx);
365
366 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
367 DMA_TO_DEVICE);
368 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
369 dev_err(jrdev, "unable to map shared descriptor\n");
370 return -ENOMEM;
371 }
372#ifdef DEBUG
514df281 373 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
045e3678
YK
374 DUMP_PREFIX_ADDRESS, 16, 4, desc,
375 desc_bytes(desc), 1);
376#endif
377
045e3678
YK
378 /* ahash_digest shared descriptor */
379 desc = ctx->sh_desc_digest;
380
db57656b
HG
381 ahash_data_to_out(desc, have_key | ctx->adata.algtype,
382 OP_ALG_AS_INITFINAL, digestsize, ctx);
045e3678
YK
383
384 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
385 desc_bytes(desc),
386 DMA_TO_DEVICE);
387 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
388 dev_err(jrdev, "unable to map shared descriptor\n");
389 return -ENOMEM;
390 }
391#ifdef DEBUG
514df281
AP
392 print_hex_dump(KERN_ERR,
393 "ahash digest shdesc@"__stringify(__LINE__)": ",
045e3678
YK
394 DUMP_PREFIX_ADDRESS, 16, 4, desc,
395 desc_bytes(desc), 1);
396#endif
397
398 return 0;
399}
400
66b3e887 401static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
402 u32 keylen)
403{
488ebc3a 404 return gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key_in, keylen);
045e3678
YK
405}
406
407/* Digest hash size if it is too large */
66b3e887 408static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
409 u32 *keylen, u8 *key_out, u32 digestsize)
410{
411 struct device *jrdev = ctx->jrdev;
412 u32 *desc;
413 struct split_key_result result;
414 dma_addr_t src_dma, dst_dma;
9e6df0fd 415 int ret;
045e3678 416
9c23b7d3 417 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
418 if (!desc) {
419 dev_err(jrdev, "unable to allocate key input memory\n");
420 return -ENOMEM;
421 }
045e3678
YK
422
423 init_job_desc(desc, 0);
424
425 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
426 DMA_TO_DEVICE);
427 if (dma_mapping_error(jrdev, src_dma)) {
428 dev_err(jrdev, "unable to map key input memory\n");
429 kfree(desc);
430 return -ENOMEM;
431 }
432 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
433 DMA_FROM_DEVICE);
434 if (dma_mapping_error(jrdev, dst_dma)) {
435 dev_err(jrdev, "unable to map key output memory\n");
436 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
437 kfree(desc);
438 return -ENOMEM;
439 }
440
441 /* Job descriptor to perform unkeyed hash on key_in */
db57656b 442 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
045e3678
YK
443 OP_ALG_AS_INITFINAL);
444 append_seq_in_ptr(desc, src_dma, *keylen, 0);
445 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
446 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
447 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
448 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
449 LDST_SRCDST_BYTE_CONTEXT);
450
451#ifdef DEBUG
514df281 452 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
045e3678 453 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
514df281 454 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
455 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
456#endif
457
458 result.err = 0;
459 init_completion(&result.completion);
460
461 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
462 if (!ret) {
463 /* in progress */
464 wait_for_completion_interruptible(&result.completion);
465 ret = result.err;
466#ifdef DEBUG
514df281
AP
467 print_hex_dump(KERN_ERR,
468 "digested key@"__stringify(__LINE__)": ",
045e3678
YK
469 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
470 digestsize, 1);
471#endif
472 }
045e3678
YK
473 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
474 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
475
e11aa9f1
HG
476 *keylen = digestsize;
477
045e3678
YK
478 kfree(desc);
479
480 return ret;
481}
482
483static int ahash_setkey(struct crypto_ahash *ahash,
484 const u8 *key, unsigned int keylen)
485{
486 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
487 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
488 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
489 struct device *jrdev = ctx->jrdev;
490 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
491 int digestsize = crypto_ahash_digestsize(ahash);
9e6df0fd 492 int ret;
045e3678
YK
493 u8 *hashed_key = NULL;
494
495#ifdef DEBUG
496 printk(KERN_ERR "keylen %d\n", keylen);
497#endif
498
499 if (keylen > blocksize) {
e7a33c4d
ME
500 hashed_key = kmalloc_array(digestsize,
501 sizeof(*hashed_key),
502 GFP_KERNEL | GFP_DMA);
045e3678
YK
503 if (!hashed_key)
504 return -ENOMEM;
505 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
506 digestsize);
507 if (ret)
d6e7a7d0 508 goto bad_free_key;
045e3678
YK
509 key = hashed_key;
510 }
511
512 /* Pick class 2 key length from algorithm submask */
488ebc3a
HG
513 ctx->adata.keylen = mdpadlen[(ctx->adata.algtype &
514 OP_ALG_ALGSEL_SUBMASK) >>
db57656b
HG
515 OP_ALG_ALGSEL_SHIFT] * 2;
516 ctx->adata.keylen_pad = ALIGN(ctx->adata.keylen, 16);
045e3678
YK
517
518#ifdef DEBUG
519 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
db57656b 520 ctx->adata.keylen, ctx->adata.keylen_pad);
514df281 521 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
045e3678
YK
522 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
523#endif
524
525 ret = gen_split_hash_key(ctx, key, keylen);
526 if (ret)
d6e7a7d0 527 goto bad_free_key;
045e3678 528
db57656b 529 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
045e3678
YK
530 DMA_TO_DEVICE);
531 if (dma_mapping_error(jrdev, ctx->key_dma)) {
532 dev_err(jrdev, "unable to map key i/o memory\n");
3d67be27 533 ret = -ENOMEM;
d6e7a7d0 534 goto error_free_key;
045e3678
YK
535 }
536#ifdef DEBUG
514df281 537 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
045e3678 538 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
db57656b 539 ctx->adata.keylen_pad, 1);
045e3678
YK
540#endif
541
542 ret = ahash_set_sh_desc(ahash);
543 if (ret) {
db57656b 544 dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
045e3678
YK
545 DMA_TO_DEVICE);
546 }
db57656b 547
d6e7a7d0 548 error_free_key:
045e3678
YK
549 kfree(hashed_key);
550 return ret;
d6e7a7d0 551 bad_free_key:
045e3678
YK
552 kfree(hashed_key);
553 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
554 return -EINVAL;
555}
556
557/*
558 * ahash_edesc - s/w-extended ahash descriptor
559 * @dst_dma: physical mapped address of req->result
560 * @sec4_sg_dma: physical mapped address of h/w link table
561 * @src_nents: number of segments in input scatterlist
562 * @sec4_sg_bytes: length of dma mapped sec4_sg space
045e3678 563 * @hw_desc: the h/w job descriptor followed by any referenced link tables
343e44b1 564 * @sec4_sg: h/w link table
045e3678
YK
565 */
566struct ahash_edesc {
567 dma_addr_t dst_dma;
568 dma_addr_t sec4_sg_dma;
569 int src_nents;
570 int sec4_sg_bytes;
d7b24ed4 571 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
343e44b1 572 struct sec4_sg_entry sec4_sg[0];
045e3678
YK
573};
574
575static inline void ahash_unmap(struct device *dev,
576 struct ahash_edesc *edesc,
577 struct ahash_request *req, int dst_len)
578{
579 if (edesc->src_nents)
13fb8fd7 580 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
045e3678
YK
581 if (edesc->dst_dma)
582 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
583
584 if (edesc->sec4_sg_bytes)
585 dma_unmap_single(dev, edesc->sec4_sg_dma,
586 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
587}
588
589static inline void ahash_unmap_ctx(struct device *dev,
590 struct ahash_edesc *edesc,
591 struct ahash_request *req, int dst_len, u32 flag)
592{
593 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
594 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
595 struct caam_hash_state *state = ahash_request_ctx(req);
596
597 if (state->ctx_dma)
598 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
599 ahash_unmap(dev, edesc, req, dst_len);
600}
601
602static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
603 void *context)
604{
605 struct ahash_request *req = context;
606 struct ahash_edesc *edesc;
607 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
608 int digestsize = crypto_ahash_digestsize(ahash);
609#ifdef DEBUG
610 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
611 struct caam_hash_state *state = ahash_request_ctx(req);
612
613 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
614#endif
615
4ca7c7d8 616 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
617 if (err)
618 caam_jr_strstatus(jrdev, err);
045e3678
YK
619
620 ahash_unmap(jrdev, edesc, req, digestsize);
621 kfree(edesc);
622
623#ifdef DEBUG
514df281 624 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
625 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
626 ctx->ctx_len, 1);
627 if (req->result)
514df281 628 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
629 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
630 digestsize, 1);
631#endif
632
633 req->base.complete(&req->base, err);
634}
635
636static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
637 void *context)
638{
639 struct ahash_request *req = context;
640 struct ahash_edesc *edesc;
641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
643#ifdef DEBUG
644 struct caam_hash_state *state = ahash_request_ctx(req);
645 int digestsize = crypto_ahash_digestsize(ahash);
646
647 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
648#endif
649
4ca7c7d8 650 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
651 if (err)
652 caam_jr_strstatus(jrdev, err);
045e3678
YK
653
654 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
655 kfree(edesc);
656
657#ifdef DEBUG
514df281 658 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
659 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
660 ctx->ctx_len, 1);
661 if (req->result)
514df281 662 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
663 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
664 digestsize, 1);
665#endif
666
667 req->base.complete(&req->base, err);
668}
669
670static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
671 void *context)
672{
673 struct ahash_request *req = context;
674 struct ahash_edesc *edesc;
675 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
676 int digestsize = crypto_ahash_digestsize(ahash);
677#ifdef DEBUG
678 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
679 struct caam_hash_state *state = ahash_request_ctx(req);
680
681 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
682#endif
683
4ca7c7d8 684 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
685 if (err)
686 caam_jr_strstatus(jrdev, err);
045e3678 687
bc9e05f9 688 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
045e3678
YK
689 kfree(edesc);
690
691#ifdef DEBUG
514df281 692 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
693 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
694 ctx->ctx_len, 1);
695 if (req->result)
514df281 696 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
697 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
698 digestsize, 1);
699#endif
700
701 req->base.complete(&req->base, err);
702}
703
704static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
705 void *context)
706{
707 struct ahash_request *req = context;
708 struct ahash_edesc *edesc;
709 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
711#ifdef DEBUG
712 struct caam_hash_state *state = ahash_request_ctx(req);
713 int digestsize = crypto_ahash_digestsize(ahash);
714
715 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
716#endif
717
4ca7c7d8 718 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
719 if (err)
720 caam_jr_strstatus(jrdev, err);
045e3678 721
ef62b231 722 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
045e3678
YK
723 kfree(edesc);
724
725#ifdef DEBUG
514df281 726 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
727 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
728 ctx->ctx_len, 1);
729 if (req->result)
514df281 730 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
731 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
732 digestsize, 1);
733#endif
734
735 req->base.complete(&req->base, err);
736}
737
5588d039
RK
738/*
739 * Allocate an enhanced descriptor, which contains the hardware descriptor
740 * and space for hardware scatter table containing sg_num entries.
741 */
742static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
30a43b44
RK
743 int sg_num, u32 *sh_desc,
744 dma_addr_t sh_desc_dma,
745 gfp_t flags)
5588d039
RK
746{
747 struct ahash_edesc *edesc;
748 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
749
750 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
751 if (!edesc) {
752 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
753 return NULL;
754 }
755
30a43b44
RK
756 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
757 HDR_SHARE_DEFER | HDR_REVERSE);
758
5588d039
RK
759 return edesc;
760}
761
65cf164a
RK
762static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
763 struct ahash_edesc *edesc,
764 struct ahash_request *req, int nents,
765 unsigned int first_sg,
766 unsigned int first_bytes, size_t to_hash)
767{
768 dma_addr_t src_dma;
769 u32 options;
770
771 if (nents > 1 || first_sg) {
772 struct sec4_sg_entry *sg = edesc->sec4_sg;
773 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
774
775 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
776
777 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
778 if (dma_mapping_error(ctx->jrdev, src_dma)) {
779 dev_err(ctx->jrdev, "unable to map S/G table\n");
780 return -ENOMEM;
781 }
782
783 edesc->sec4_sg_bytes = sgsize;
784 edesc->sec4_sg_dma = src_dma;
785 options = LDST_SGF;
786 } else {
787 src_dma = sg_dma_address(req->src);
788 options = 0;
789 }
790
791 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
792 options);
793
794 return 0;
795}
796
045e3678
YK
797/* submit update job descriptor */
798static int ahash_update_ctx(struct ahash_request *req)
799{
800 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
801 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
802 struct caam_hash_state *state = ahash_request_ctx(req);
803 struct device *jrdev = ctx->jrdev;
804 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
805 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
806 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
807 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
808 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
809 int *next_buflen = state->current_buf ? &state->buflen_0 :
810 &state->buflen_1, last_buflen;
811 int in_len = *buflen + req->nbytes, to_hash;
30a43b44 812 u32 *desc;
bc13c69e 813 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
814 struct ahash_edesc *edesc;
815 int ret = 0;
045e3678
YK
816
817 last_buflen = *next_buflen;
818 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
819 to_hash = in_len - *next_buflen;
820
821 if (to_hash) {
13fb8fd7
LC
822 src_nents = sg_nents_for_len(req->src,
823 req->nbytes - (*next_buflen));
f9970c28
LC
824 if (src_nents < 0) {
825 dev_err(jrdev, "Invalid number of src SG.\n");
826 return src_nents;
827 }
bc13c69e
RK
828
829 if (src_nents) {
830 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
831 DMA_TO_DEVICE);
832 if (!mapped_nents) {
833 dev_err(jrdev, "unable to DMA map source\n");
834 return -ENOMEM;
835 }
836 } else {
837 mapped_nents = 0;
838 }
839
045e3678 840 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
bc13c69e 841 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
842 sizeof(struct sec4_sg_entry);
843
844 /*
845 * allocate space for base edesc and hw desc commands,
846 * link tables
847 */
5588d039 848 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
30a43b44
RK
849 ctx->sh_desc_update,
850 ctx->sh_desc_update_dma, flags);
045e3678 851 if (!edesc) {
bc13c69e 852 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
853 return -ENOMEM;
854 }
855
856 edesc->src_nents = src_nents;
857 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 858
ce572085
HG
859 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
860 edesc->sec4_sg, DMA_BIDIRECTIONAL);
861 if (ret)
58b0e5d0 862 goto unmap_ctx;
045e3678
YK
863
864 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
865 edesc->sec4_sg + 1,
866 buf, state->buf_dma,
c7556ff7 867 *buflen, last_buflen);
045e3678 868
bc13c69e
RK
869 if (mapped_nents) {
870 sg_to_sec4_sg_last(req->src, mapped_nents,
871 edesc->sec4_sg + sec4_sg_src_index,
872 0);
8af7b0f8 873 if (*next_buflen)
307fd543
CS
874 scatterwalk_map_and_copy(next_buf, req->src,
875 to_hash - *buflen,
876 *next_buflen, 0);
045e3678
YK
877 } else {
878 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
261ea058 879 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678
YK
880 }
881
8af7b0f8
VM
882 state->current_buf = !state->current_buf;
883
045e3678 884 desc = edesc->hw_desc;
045e3678 885
1da2be33
RG
886 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
887 sec4_sg_bytes,
888 DMA_TO_DEVICE);
ce572085
HG
889 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
890 dev_err(jrdev, "unable to map S/G table\n");
32686d34 891 ret = -ENOMEM;
58b0e5d0 892 goto unmap_ctx;
ce572085 893 }
1da2be33 894
045e3678
YK
895 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
896 to_hash, LDST_SGF);
897
898 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
899
900#ifdef DEBUG
514df281 901 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
902 DUMP_PREFIX_ADDRESS, 16, 4, desc,
903 desc_bytes(desc), 1);
904#endif
905
906 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
32686d34 907 if (ret)
58b0e5d0 908 goto unmap_ctx;
32686d34
RK
909
910 ret = -EINPROGRESS;
045e3678 911 } else if (*next_buflen) {
307fd543
CS
912 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
913 req->nbytes, 0);
045e3678
YK
914 *buflen = *next_buflen;
915 *next_buflen = last_buflen;
916 }
917#ifdef DEBUG
514df281 918 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 919 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 920 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
921 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
922 *next_buflen, 1);
923#endif
924
925 return ret;
58b0e5d0 926 unmap_ctx:
32686d34
RK
927 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
928 kfree(edesc);
929 return ret;
045e3678
YK
930}
931
932static int ahash_final_ctx(struct ahash_request *req)
933{
934 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
935 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
936 struct caam_hash_state *state = ahash_request_ctx(req);
937 struct device *jrdev = ctx->jrdev;
938 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
939 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
940 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
941 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
942 int last_buflen = state->current_buf ? state->buflen_0 :
943 state->buflen_1;
30a43b44 944 u32 *desc;
b310c178 945 int sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
946 int digestsize = crypto_ahash_digestsize(ahash);
947 struct ahash_edesc *edesc;
9e6df0fd 948 int ret;
045e3678 949
b310c178
HG
950 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
951 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
045e3678
YK
952
953 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
954 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
955 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
956 flags);
5588d039 957 if (!edesc)
045e3678 958 return -ENOMEM;
045e3678 959
045e3678 960 desc = edesc->hw_desc;
045e3678
YK
961
962 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
963 edesc->src_nents = 0;
964
ce572085
HG
965 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
966 edesc->sec4_sg, DMA_TO_DEVICE);
967 if (ret)
58b0e5d0 968 goto unmap_ctx;
045e3678
YK
969
970 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
971 buf, state->buf_dma, buflen,
972 last_buflen);
261ea058
HG
973 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
974 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678 975
1da2be33
RG
976 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
977 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
978 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
979 dev_err(jrdev, "unable to map S/G table\n");
32686d34 980 ret = -ENOMEM;
58b0e5d0 981 goto unmap_ctx;
ce572085 982 }
1da2be33 983
045e3678
YK
984 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
985 LDST_SGF);
986
987 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
988 digestsize);
ce572085
HG
989 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
990 dev_err(jrdev, "unable to map dst\n");
32686d34 991 ret = -ENOMEM;
58b0e5d0 992 goto unmap_ctx;
ce572085 993 }
045e3678
YK
994
995#ifdef DEBUG
514df281 996 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
997 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
998#endif
999
1000 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 1001 if (ret)
58b0e5d0 1002 goto unmap_ctx;
045e3678 1003
32686d34 1004 return -EINPROGRESS;
58b0e5d0 1005 unmap_ctx:
32686d34
RK
1006 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1007 kfree(edesc);
045e3678
YK
1008 return ret;
1009}
1010
1011static int ahash_finup_ctx(struct ahash_request *req)
1012{
1013 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1014 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1015 struct caam_hash_state *state = ahash_request_ctx(req);
1016 struct device *jrdev = ctx->jrdev;
1017 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1018 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1019 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1020 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1021 int last_buflen = state->current_buf ? state->buflen_0 :
1022 state->buflen_1;
30a43b44 1023 u32 *desc;
65cf164a 1024 int sec4_sg_src_index;
bc13c69e 1025 int src_nents, mapped_nents;
045e3678
YK
1026 int digestsize = crypto_ahash_digestsize(ahash);
1027 struct ahash_edesc *edesc;
9e6df0fd 1028 int ret;
045e3678 1029
13fb8fd7 1030 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1031 if (src_nents < 0) {
1032 dev_err(jrdev, "Invalid number of src SG.\n");
1033 return src_nents;
1034 }
bc13c69e
RK
1035
1036 if (src_nents) {
1037 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1038 DMA_TO_DEVICE);
1039 if (!mapped_nents) {
1040 dev_err(jrdev, "unable to DMA map source\n");
1041 return -ENOMEM;
1042 }
1043 } else {
1044 mapped_nents = 0;
1045 }
1046
045e3678 1047 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
045e3678
YK
1048
1049 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1050 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
9a1a1c08 1051 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
5588d039 1052 flags);
045e3678 1053 if (!edesc) {
bc13c69e 1054 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1055 return -ENOMEM;
1056 }
1057
045e3678 1058 desc = edesc->hw_desc;
045e3678
YK
1059
1060 edesc->src_nents = src_nents;
045e3678 1061
ce572085
HG
1062 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1063 edesc->sec4_sg, DMA_TO_DEVICE);
1064 if (ret)
58b0e5d0 1065 goto unmap_ctx;
045e3678
YK
1066
1067 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1068 buf, state->buf_dma, buflen,
1069 last_buflen);
1070
65cf164a
RK
1071 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1072 sec4_sg_src_index, ctx->ctx_len + buflen,
1073 req->nbytes);
1074 if (ret)
58b0e5d0 1075 goto unmap_ctx;
045e3678
YK
1076
1077 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1078 digestsize);
ce572085
HG
1079 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1080 dev_err(jrdev, "unable to map dst\n");
32686d34 1081 ret = -ENOMEM;
58b0e5d0 1082 goto unmap_ctx;
ce572085 1083 }
045e3678
YK
1084
1085#ifdef DEBUG
514df281 1086 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1087 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1088#endif
1089
1090 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 1091 if (ret)
58b0e5d0 1092 goto unmap_ctx;
045e3678 1093
32686d34 1094 return -EINPROGRESS;
58b0e5d0 1095 unmap_ctx:
32686d34
RK
1096 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1097 kfree(edesc);
045e3678
YK
1098 return ret;
1099}
1100
1101static int ahash_digest(struct ahash_request *req)
1102{
1103 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1104 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1105 struct device *jrdev = ctx->jrdev;
1106 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1107 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
30a43b44 1108 u32 *desc;
045e3678 1109 int digestsize = crypto_ahash_digestsize(ahash);
65cf164a 1110 int src_nents, mapped_nents;
045e3678 1111 struct ahash_edesc *edesc;
9e6df0fd 1112 int ret;
045e3678 1113
3d5a2db6 1114 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1115 if (src_nents < 0) {
1116 dev_err(jrdev, "Invalid number of src SG.\n");
1117 return src_nents;
1118 }
bc13c69e
RK
1119
1120 if (src_nents) {
1121 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1122 DMA_TO_DEVICE);
1123 if (!mapped_nents) {
1124 dev_err(jrdev, "unable to map source for DMA\n");
1125 return -ENOMEM;
1126 }
1127 } else {
1128 mapped_nents = 0;
1129 }
1130
045e3678 1131 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1132 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
30a43b44 1133 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
5588d039 1134 flags);
045e3678 1135 if (!edesc) {
bc13c69e 1136 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1137 return -ENOMEM;
1138 }
343e44b1 1139
045e3678
YK
1140 edesc->src_nents = src_nents;
1141
65cf164a
RK
1142 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1143 req->nbytes);
1144 if (ret) {
1145 ahash_unmap(jrdev, edesc, req, digestsize);
1146 kfree(edesc);
1147 return ret;
045e3678 1148 }
65cf164a
RK
1149
1150 desc = edesc->hw_desc;
045e3678
YK
1151
1152 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1153 digestsize);
ce572085
HG
1154 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1155 dev_err(jrdev, "unable to map dst\n");
32686d34
RK
1156 ahash_unmap(jrdev, edesc, req, digestsize);
1157 kfree(edesc);
ce572085
HG
1158 return -ENOMEM;
1159 }
045e3678
YK
1160
1161#ifdef DEBUG
514df281 1162 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1163 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1164#endif
1165
1166 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1167 if (!ret) {
1168 ret = -EINPROGRESS;
1169 } else {
1170 ahash_unmap(jrdev, edesc, req, digestsize);
1171 kfree(edesc);
1172 }
1173
1174 return ret;
1175}
1176
1177/* submit ahash final if it the first job descriptor */
1178static int ahash_final_no_ctx(struct ahash_request *req)
1179{
1180 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1181 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1182 struct caam_hash_state *state = ahash_request_ctx(req);
1183 struct device *jrdev = ctx->jrdev;
1184 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1185 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1186 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1187 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
30a43b44 1188 u32 *desc;
045e3678
YK
1189 int digestsize = crypto_ahash_digestsize(ahash);
1190 struct ahash_edesc *edesc;
9e6df0fd 1191 int ret;
045e3678
YK
1192
1193 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1194 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1195 ctx->sh_desc_digest_dma, flags);
5588d039 1196 if (!edesc)
045e3678 1197 return -ENOMEM;
045e3678 1198
045e3678 1199 desc = edesc->hw_desc;
045e3678
YK
1200
1201 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
ce572085
HG
1202 if (dma_mapping_error(jrdev, state->buf_dma)) {
1203 dev_err(jrdev, "unable to map src\n");
06435f34 1204 goto unmap;
ce572085 1205 }
045e3678
YK
1206
1207 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1208
1209 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1210 digestsize);
ce572085
HG
1211 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1212 dev_err(jrdev, "unable to map dst\n");
06435f34 1213 goto unmap;
ce572085 1214 }
045e3678
YK
1215 edesc->src_nents = 0;
1216
1217#ifdef DEBUG
514df281 1218 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1219 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1220#endif
1221
1222 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1223 if (!ret) {
1224 ret = -EINPROGRESS;
1225 } else {
1226 ahash_unmap(jrdev, edesc, req, digestsize);
1227 kfree(edesc);
1228 }
1229
1230 return ret;
06435f34
ME
1231 unmap:
1232 ahash_unmap(jrdev, edesc, req, digestsize);
1233 kfree(edesc);
1234 return -ENOMEM;
1235
045e3678
YK
1236}
1237
1238/* submit ahash update if it the first job descriptor after update */
1239static int ahash_update_no_ctx(struct ahash_request *req)
1240{
1241 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1242 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1243 struct caam_hash_state *state = ahash_request_ctx(req);
1244 struct device *jrdev = ctx->jrdev;
1245 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1246 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1247 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1248 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1249 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1250 int *next_buflen = state->current_buf ? &state->buflen_0 :
1251 &state->buflen_1;
1252 int in_len = *buflen + req->nbytes, to_hash;
bc13c69e 1253 int sec4_sg_bytes, src_nents, mapped_nents;
045e3678 1254 struct ahash_edesc *edesc;
30a43b44 1255 u32 *desc;
045e3678 1256 int ret = 0;
045e3678
YK
1257
1258 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1259 to_hash = in_len - *next_buflen;
1260
1261 if (to_hash) {
13fb8fd7 1262 src_nents = sg_nents_for_len(req->src,
3d5a2db6 1263 req->nbytes - *next_buflen);
f9970c28
LC
1264 if (src_nents < 0) {
1265 dev_err(jrdev, "Invalid number of src SG.\n");
1266 return src_nents;
1267 }
bc13c69e
RK
1268
1269 if (src_nents) {
1270 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1271 DMA_TO_DEVICE);
1272 if (!mapped_nents) {
1273 dev_err(jrdev, "unable to DMA map source\n");
1274 return -ENOMEM;
1275 }
1276 } else {
1277 mapped_nents = 0;
1278 }
1279
1280 sec4_sg_bytes = (1 + mapped_nents) *
045e3678
YK
1281 sizeof(struct sec4_sg_entry);
1282
1283 /*
1284 * allocate space for base edesc and hw desc commands,
1285 * link tables
1286 */
30a43b44
RK
1287 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1288 ctx->sh_desc_update_first,
1289 ctx->sh_desc_update_first_dma,
1290 flags);
045e3678 1291 if (!edesc) {
bc13c69e 1292 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1293 return -ENOMEM;
1294 }
1295
1296 edesc->src_nents = src_nents;
1297 edesc->sec4_sg_bytes = sec4_sg_bytes;
76b99080 1298 edesc->dst_dma = 0;
045e3678
YK
1299
1300 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1301 buf, *buflen);
bc13c69e
RK
1302 sg_to_sec4_sg_last(req->src, mapped_nents,
1303 edesc->sec4_sg + 1, 0);
1304
045e3678 1305 if (*next_buflen) {
307fd543
CS
1306 scatterwalk_map_and_copy(next_buf, req->src,
1307 to_hash - *buflen,
1308 *next_buflen, 0);
045e3678
YK
1309 }
1310
8af7b0f8
VM
1311 state->current_buf = !state->current_buf;
1312
045e3678 1313 desc = edesc->hw_desc;
045e3678 1314
1da2be33
RG
1315 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1316 sec4_sg_bytes,
1317 DMA_TO_DEVICE);
ce572085
HG
1318 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1319 dev_err(jrdev, "unable to map S/G table\n");
32686d34 1320 ret = -ENOMEM;
58b0e5d0 1321 goto unmap_ctx;
ce572085 1322 }
1da2be33 1323
045e3678
YK
1324 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1325
ce572085
HG
1326 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1327 if (ret)
58b0e5d0 1328 goto unmap_ctx;
045e3678
YK
1329
1330#ifdef DEBUG
514df281 1331 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1332 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1333 desc_bytes(desc), 1);
1334#endif
1335
1336 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
32686d34 1337 if (ret)
58b0e5d0 1338 goto unmap_ctx;
32686d34
RK
1339
1340 ret = -EINPROGRESS;
1341 state->update = ahash_update_ctx;
1342 state->finup = ahash_finup_ctx;
1343 state->final = ahash_final_ctx;
045e3678 1344 } else if (*next_buflen) {
307fd543
CS
1345 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1346 req->nbytes, 0);
045e3678
YK
1347 *buflen = *next_buflen;
1348 *next_buflen = 0;
1349 }
1350#ifdef DEBUG
514df281 1351 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 1352 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 1353 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1354 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1355 *next_buflen, 1);
1356#endif
1357
1358 return ret;
58b0e5d0 1359 unmap_ctx:
32686d34
RK
1360 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1361 kfree(edesc);
1362 return ret;
045e3678
YK
1363}
1364
1365/* submit ahash finup if it the first job descriptor after update */
1366static int ahash_finup_no_ctx(struct ahash_request *req)
1367{
1368 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1369 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1370 struct caam_hash_state *state = ahash_request_ctx(req);
1371 struct device *jrdev = ctx->jrdev;
1372 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1373 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1374 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1375 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1376 int last_buflen = state->current_buf ? state->buflen_0 :
1377 state->buflen_1;
30a43b44 1378 u32 *desc;
bc13c69e 1379 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
045e3678
YK
1380 int digestsize = crypto_ahash_digestsize(ahash);
1381 struct ahash_edesc *edesc;
9e6df0fd 1382 int ret;
045e3678 1383
13fb8fd7 1384 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1385 if (src_nents < 0) {
1386 dev_err(jrdev, "Invalid number of src SG.\n");
1387 return src_nents;
1388 }
bc13c69e
RK
1389
1390 if (src_nents) {
1391 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1392 DMA_TO_DEVICE);
1393 if (!mapped_nents) {
1394 dev_err(jrdev, "unable to DMA map source\n");
1395 return -ENOMEM;
1396 }
1397 } else {
1398 mapped_nents = 0;
1399 }
1400
045e3678 1401 sec4_sg_src_index = 2;
bc13c69e 1402 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
1403 sizeof(struct sec4_sg_entry);
1404
1405 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1406 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1407 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1408 flags);
045e3678 1409 if (!edesc) {
bc13c69e 1410 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1411 return -ENOMEM;
1412 }
1413
045e3678 1414 desc = edesc->hw_desc;
045e3678
YK
1415
1416 edesc->src_nents = src_nents;
1417 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
1418
1419 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1420 state->buf_dma, buflen,
1421 last_buflen);
1422
65cf164a
RK
1423 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1424 req->nbytes);
1425 if (ret) {
ce572085 1426 dev_err(jrdev, "unable to map S/G table\n");
06435f34 1427 goto unmap;
ce572085 1428 }
1da2be33 1429
045e3678
YK
1430 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1431 digestsize);
ce572085
HG
1432 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1433 dev_err(jrdev, "unable to map dst\n");
06435f34 1434 goto unmap;
ce572085 1435 }
045e3678
YK
1436
1437#ifdef DEBUG
514df281 1438 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1439 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1440#endif
1441
1442 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1443 if (!ret) {
1444 ret = -EINPROGRESS;
1445 } else {
1446 ahash_unmap(jrdev, edesc, req, digestsize);
1447 kfree(edesc);
1448 }
1449
1450 return ret;
06435f34
ME
1451 unmap:
1452 ahash_unmap(jrdev, edesc, req, digestsize);
1453 kfree(edesc);
1454 return -ENOMEM;
1455
045e3678
YK
1456}
1457
1458/* submit first update job descriptor after init */
1459static int ahash_update_first(struct ahash_request *req)
1460{
1461 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1462 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1463 struct caam_hash_state *state = ahash_request_ctx(req);
1464 struct device *jrdev = ctx->jrdev;
1465 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1466 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
4451d494
CS
1467 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1468 int *next_buflen = state->current_buf ?
1469 &state->buflen_1 : &state->buflen_0;
045e3678 1470 int to_hash;
30a43b44 1471 u32 *desc;
65cf164a 1472 int src_nents, mapped_nents;
045e3678
YK
1473 struct ahash_edesc *edesc;
1474 int ret = 0;
045e3678
YK
1475
1476 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1477 1);
1478 to_hash = req->nbytes - *next_buflen;
1479
1480 if (to_hash) {
3d5a2db6
RK
1481 src_nents = sg_nents_for_len(req->src,
1482 req->nbytes - *next_buflen);
f9970c28
LC
1483 if (src_nents < 0) {
1484 dev_err(jrdev, "Invalid number of src SG.\n");
1485 return src_nents;
1486 }
bc13c69e
RK
1487
1488 if (src_nents) {
1489 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1490 DMA_TO_DEVICE);
1491 if (!mapped_nents) {
1492 dev_err(jrdev, "unable to map source for DMA\n");
1493 return -ENOMEM;
1494 }
1495 } else {
1496 mapped_nents = 0;
1497 }
045e3678
YK
1498
1499 /*
1500 * allocate space for base edesc and hw desc commands,
1501 * link tables
1502 */
5588d039 1503 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
30a43b44
RK
1504 mapped_nents : 0,
1505 ctx->sh_desc_update_first,
1506 ctx->sh_desc_update_first_dma,
1507 flags);
045e3678 1508 if (!edesc) {
bc13c69e 1509 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1510 return -ENOMEM;
1511 }
1512
1513 edesc->src_nents = src_nents;
76b99080 1514 edesc->dst_dma = 0;
045e3678 1515
65cf164a
RK
1516 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1517 to_hash);
1518 if (ret)
58b0e5d0 1519 goto unmap_ctx;
045e3678
YK
1520
1521 if (*next_buflen)
307fd543
CS
1522 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1523 *next_buflen, 0);
045e3678 1524
045e3678 1525 desc = edesc->hw_desc;
045e3678 1526
ce572085
HG
1527 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1528 if (ret)
58b0e5d0 1529 goto unmap_ctx;
045e3678
YK
1530
1531#ifdef DEBUG
514df281 1532 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1533 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1534 desc_bytes(desc), 1);
1535#endif
1536
32686d34
RK
1537 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1538 if (ret)
58b0e5d0 1539 goto unmap_ctx;
32686d34
RK
1540
1541 ret = -EINPROGRESS;
1542 state->update = ahash_update_ctx;
1543 state->finup = ahash_finup_ctx;
1544 state->final = ahash_final_ctx;
045e3678
YK
1545 } else if (*next_buflen) {
1546 state->update = ahash_update_no_ctx;
1547 state->finup = ahash_finup_no_ctx;
1548 state->final = ahash_final_no_ctx;
307fd543
CS
1549 scatterwalk_map_and_copy(next_buf, req->src, 0,
1550 req->nbytes, 0);
045e3678
YK
1551 }
1552#ifdef DEBUG
514df281 1553 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1554 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1555 *next_buflen, 1);
1556#endif
1557
1558 return ret;
58b0e5d0 1559 unmap_ctx:
32686d34
RK
1560 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1561 kfree(edesc);
1562 return ret;
045e3678
YK
1563}
1564
1565static int ahash_finup_first(struct ahash_request *req)
1566{
1567 return ahash_digest(req);
1568}
1569
1570static int ahash_init(struct ahash_request *req)
1571{
1572 struct caam_hash_state *state = ahash_request_ctx(req);
1573
1574 state->update = ahash_update_first;
1575 state->finup = ahash_finup_first;
1576 state->final = ahash_final_no_ctx;
1577
1578 state->current_buf = 0;
de0e35ec 1579 state->buf_dma = 0;
6fd4b156
SC
1580 state->buflen_0 = 0;
1581 state->buflen_1 = 0;
045e3678
YK
1582
1583 return 0;
1584}
1585
1586static int ahash_update(struct ahash_request *req)
1587{
1588 struct caam_hash_state *state = ahash_request_ctx(req);
1589
1590 return state->update(req);
1591}
1592
1593static int ahash_finup(struct ahash_request *req)
1594{
1595 struct caam_hash_state *state = ahash_request_ctx(req);
1596
1597 return state->finup(req);
1598}
1599
1600static int ahash_final(struct ahash_request *req)
1601{
1602 struct caam_hash_state *state = ahash_request_ctx(req);
1603
1604 return state->final(req);
1605}
1606
1607static int ahash_export(struct ahash_request *req, void *out)
1608{
045e3678 1609 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831
RK
1610 struct caam_export_state *export = out;
1611 int len;
1612 u8 *buf;
045e3678 1613
5ec90831
RK
1614 if (state->current_buf) {
1615 buf = state->buf_1;
1616 len = state->buflen_1;
1617 } else {
1618 buf = state->buf_0;
f456cd2d 1619 len = state->buflen_0;
5ec90831
RK
1620 }
1621
1622 memcpy(export->buf, buf, len);
1623 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1624 export->buflen = len;
1625 export->update = state->update;
1626 export->final = state->final;
1627 export->finup = state->finup;
434b4212 1628
045e3678
YK
1629 return 0;
1630}
1631
1632static int ahash_import(struct ahash_request *req, const void *in)
1633{
045e3678 1634 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831 1635 const struct caam_export_state *export = in;
045e3678 1636
5ec90831
RK
1637 memset(state, 0, sizeof(*state));
1638 memcpy(state->buf_0, export->buf, export->buflen);
1639 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1640 state->buflen_0 = export->buflen;
1641 state->update = export->update;
1642 state->final = export->final;
1643 state->finup = export->finup;
434b4212 1644
045e3678
YK
1645 return 0;
1646}
1647
1648struct caam_hash_template {
1649 char name[CRYPTO_MAX_ALG_NAME];
1650 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1651 char hmac_name[CRYPTO_MAX_ALG_NAME];
1652 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1653 unsigned int blocksize;
1654 struct ahash_alg template_ahash;
1655 u32 alg_type;
045e3678
YK
1656};
1657
1658/* ahash descriptors */
1659static struct caam_hash_template driver_hash[] = {
1660 {
b0e09bae
YK
1661 .name = "sha1",
1662 .driver_name = "sha1-caam",
1663 .hmac_name = "hmac(sha1)",
1664 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1665 .blocksize = SHA1_BLOCK_SIZE,
1666 .template_ahash = {
1667 .init = ahash_init,
1668 .update = ahash_update,
1669 .final = ahash_final,
1670 .finup = ahash_finup,
1671 .digest = ahash_digest,
1672 .export = ahash_export,
1673 .import = ahash_import,
1674 .setkey = ahash_setkey,
1675 .halg = {
1676 .digestsize = SHA1_DIGEST_SIZE,
5ec90831 1677 .statesize = sizeof(struct caam_export_state),
045e3678 1678 },
659f313d 1679 },
045e3678 1680 .alg_type = OP_ALG_ALGSEL_SHA1,
045e3678 1681 }, {
b0e09bae
YK
1682 .name = "sha224",
1683 .driver_name = "sha224-caam",
1684 .hmac_name = "hmac(sha224)",
1685 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1686 .blocksize = SHA224_BLOCK_SIZE,
1687 .template_ahash = {
1688 .init = ahash_init,
1689 .update = ahash_update,
1690 .final = ahash_final,
1691 .finup = ahash_finup,
1692 .digest = ahash_digest,
1693 .export = ahash_export,
1694 .import = ahash_import,
1695 .setkey = ahash_setkey,
1696 .halg = {
1697 .digestsize = SHA224_DIGEST_SIZE,
5ec90831 1698 .statesize = sizeof(struct caam_export_state),
045e3678 1699 },
659f313d 1700 },
045e3678 1701 .alg_type = OP_ALG_ALGSEL_SHA224,
045e3678 1702 }, {
b0e09bae
YK
1703 .name = "sha256",
1704 .driver_name = "sha256-caam",
1705 .hmac_name = "hmac(sha256)",
1706 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1707 .blocksize = SHA256_BLOCK_SIZE,
1708 .template_ahash = {
1709 .init = ahash_init,
1710 .update = ahash_update,
1711 .final = ahash_final,
1712 .finup = ahash_finup,
1713 .digest = ahash_digest,
1714 .export = ahash_export,
1715 .import = ahash_import,
1716 .setkey = ahash_setkey,
1717 .halg = {
1718 .digestsize = SHA256_DIGEST_SIZE,
5ec90831 1719 .statesize = sizeof(struct caam_export_state),
045e3678 1720 },
659f313d 1721 },
045e3678 1722 .alg_type = OP_ALG_ALGSEL_SHA256,
045e3678 1723 }, {
b0e09bae
YK
1724 .name = "sha384",
1725 .driver_name = "sha384-caam",
1726 .hmac_name = "hmac(sha384)",
1727 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1728 .blocksize = SHA384_BLOCK_SIZE,
1729 .template_ahash = {
1730 .init = ahash_init,
1731 .update = ahash_update,
1732 .final = ahash_final,
1733 .finup = ahash_finup,
1734 .digest = ahash_digest,
1735 .export = ahash_export,
1736 .import = ahash_import,
1737 .setkey = ahash_setkey,
1738 .halg = {
1739 .digestsize = SHA384_DIGEST_SIZE,
5ec90831 1740 .statesize = sizeof(struct caam_export_state),
045e3678 1741 },
659f313d 1742 },
045e3678 1743 .alg_type = OP_ALG_ALGSEL_SHA384,
045e3678 1744 }, {
b0e09bae
YK
1745 .name = "sha512",
1746 .driver_name = "sha512-caam",
1747 .hmac_name = "hmac(sha512)",
1748 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1749 .blocksize = SHA512_BLOCK_SIZE,
1750 .template_ahash = {
1751 .init = ahash_init,
1752 .update = ahash_update,
1753 .final = ahash_final,
1754 .finup = ahash_finup,
1755 .digest = ahash_digest,
1756 .export = ahash_export,
1757 .import = ahash_import,
1758 .setkey = ahash_setkey,
1759 .halg = {
1760 .digestsize = SHA512_DIGEST_SIZE,
5ec90831 1761 .statesize = sizeof(struct caam_export_state),
045e3678 1762 },
659f313d 1763 },
045e3678 1764 .alg_type = OP_ALG_ALGSEL_SHA512,
045e3678 1765 }, {
b0e09bae
YK
1766 .name = "md5",
1767 .driver_name = "md5-caam",
1768 .hmac_name = "hmac(md5)",
1769 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1770 .blocksize = MD5_BLOCK_WORDS * 4,
1771 .template_ahash = {
1772 .init = ahash_init,
1773 .update = ahash_update,
1774 .final = ahash_final,
1775 .finup = ahash_finup,
1776 .digest = ahash_digest,
1777 .export = ahash_export,
1778 .import = ahash_import,
1779 .setkey = ahash_setkey,
1780 .halg = {
1781 .digestsize = MD5_DIGEST_SIZE,
5ec90831 1782 .statesize = sizeof(struct caam_export_state),
045e3678 1783 },
659f313d 1784 },
045e3678 1785 .alg_type = OP_ALG_ALGSEL_MD5,
045e3678
YK
1786 },
1787};
1788
1789struct caam_hash_alg {
1790 struct list_head entry;
045e3678 1791 int alg_type;
045e3678
YK
1792 struct ahash_alg ahash_alg;
1793};
1794
1795static int caam_hash_cra_init(struct crypto_tfm *tfm)
1796{
1797 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1798 struct crypto_alg *base = tfm->__crt_alg;
1799 struct hash_alg_common *halg =
1800 container_of(base, struct hash_alg_common, base);
1801 struct ahash_alg *alg =
1802 container_of(halg, struct ahash_alg, halg);
1803 struct caam_hash_alg *caam_hash =
1804 container_of(alg, struct caam_hash_alg, ahash_alg);
1805 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
045e3678
YK
1806 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1807 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1808 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1809 HASH_MSG_LEN + 32,
1810 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1811 HASH_MSG_LEN + 64,
1812 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
045e3678
YK
1813
1814 /*
cfc6f11b 1815 * Get a Job ring from Job Ring driver to ensure in-order
045e3678
YK
1816 * crypto request processing per tfm
1817 */
cfc6f11b
RG
1818 ctx->jrdev = caam_jr_alloc();
1819 if (IS_ERR(ctx->jrdev)) {
1820 pr_err("Job Ring Device allocation for transform failed\n");
1821 return PTR_ERR(ctx->jrdev);
1822 }
045e3678 1823 /* copy descriptor header template value */
db57656b 1824 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
045e3678 1825
488ebc3a
HG
1826 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1827 OP_ALG_ALGSEL_SUBMASK) >>
045e3678
YK
1828 OP_ALG_ALGSEL_SHIFT];
1829
1830 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1831 sizeof(struct caam_hash_state));
e6cc5b8d 1832 return ahash_set_sh_desc(ahash);
045e3678
YK
1833}
1834
1835static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1836{
1837 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1838
1839 if (ctx->sh_desc_update_dma &&
1840 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1841 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1842 desc_bytes(ctx->sh_desc_update),
1843 DMA_TO_DEVICE);
1844 if (ctx->sh_desc_update_first_dma &&
1845 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1846 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1847 desc_bytes(ctx->sh_desc_update_first),
1848 DMA_TO_DEVICE);
1849 if (ctx->sh_desc_fin_dma &&
1850 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1851 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1852 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1853 if (ctx->sh_desc_digest_dma &&
1854 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1855 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1856 desc_bytes(ctx->sh_desc_digest),
1857 DMA_TO_DEVICE);
cfc6f11b
RG
1858
1859 caam_jr_free(ctx->jrdev);
045e3678
YK
1860}
1861
1862static void __exit caam_algapi_hash_exit(void)
1863{
045e3678
YK
1864 struct caam_hash_alg *t_alg, *n;
1865
cfc6f11b 1866 if (!hash_list.next)
045e3678
YK
1867 return;
1868
cfc6f11b 1869 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
045e3678
YK
1870 crypto_unregister_ahash(&t_alg->ahash_alg);
1871 list_del(&t_alg->entry);
1872 kfree(t_alg);
1873 }
1874}
1875
1876static struct caam_hash_alg *
cfc6f11b 1877caam_hash_alloc(struct caam_hash_template *template,
b0e09bae 1878 bool keyed)
045e3678
YK
1879{
1880 struct caam_hash_alg *t_alg;
1881 struct ahash_alg *halg;
1882 struct crypto_alg *alg;
1883
9c4f9733 1884 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
045e3678 1885 if (!t_alg) {
cfc6f11b 1886 pr_err("failed to allocate t_alg\n");
045e3678
YK
1887 return ERR_PTR(-ENOMEM);
1888 }
1889
1890 t_alg->ahash_alg = template->template_ahash;
1891 halg = &t_alg->ahash_alg;
1892 alg = &halg->halg.base;
1893
b0e09bae
YK
1894 if (keyed) {
1895 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1896 template->hmac_name);
1897 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1898 template->hmac_driver_name);
1899 } else {
1900 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1901 template->name);
1902 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1903 template->driver_name);
a0118c8b 1904 t_alg->ahash_alg.setkey = NULL;
b0e09bae 1905 }
045e3678
YK
1906 alg->cra_module = THIS_MODULE;
1907 alg->cra_init = caam_hash_cra_init;
1908 alg->cra_exit = caam_hash_cra_exit;
1909 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1910 alg->cra_priority = CAAM_CRA_PRIORITY;
1911 alg->cra_blocksize = template->blocksize;
1912 alg->cra_alignmask = 0;
1913 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1914 alg->cra_type = &crypto_ahash_type;
1915
1916 t_alg->alg_type = template->alg_type;
045e3678
YK
1917
1918 return t_alg;
1919}
1920
1921static int __init caam_algapi_hash_init(void)
1922{
35af6403
RG
1923 struct device_node *dev_node;
1924 struct platform_device *pdev;
1925 struct device *ctrldev;
045e3678 1926 int i = 0, err = 0;
bf83490e
VM
1927 struct caam_drv_private *priv;
1928 unsigned int md_limit = SHA512_DIGEST_SIZE;
1929 u32 cha_inst, cha_vid;
045e3678 1930
35af6403
RG
1931 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1932 if (!dev_node) {
1933 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1934 if (!dev_node)
1935 return -ENODEV;
1936 }
1937
1938 pdev = of_find_device_by_node(dev_node);
1939 if (!pdev) {
1940 of_node_put(dev_node);
1941 return -ENODEV;
1942 }
1943
1944 ctrldev = &pdev->dev;
1945 priv = dev_get_drvdata(ctrldev);
1946 of_node_put(dev_node);
1947
1948 /*
1949 * If priv is NULL, it's probably because the caam driver wasn't
1950 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1951 */
1952 if (!priv)
1953 return -ENODEV;
1954
bf83490e
VM
1955 /*
1956 * Register crypto algorithms the device supports. First, identify
1957 * presence and attributes of MD block.
1958 */
1959 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1960 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1961
1962 /*
1963 * Skip registration of any hashing algorithms if MD block
1964 * is not present.
1965 */
1966 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1967 return -ENODEV;
1968
1969 /* Limit digest size based on LP256 */
1970 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1971 md_limit = SHA256_DIGEST_SIZE;
1972
cfc6f11b 1973 INIT_LIST_HEAD(&hash_list);
045e3678
YK
1974
1975 /* register crypto algorithms the device supports */
1976 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
045e3678 1977 struct caam_hash_alg *t_alg;
bf83490e
VM
1978 struct caam_hash_template *alg = driver_hash + i;
1979
1980 /* If MD size is not supported by device, skip registration */
1981 if (alg->template_ahash.halg.digestsize > md_limit)
1982 continue;
045e3678 1983
b0e09bae 1984 /* register hmac version */
bf83490e 1985 t_alg = caam_hash_alloc(alg, true);
b0e09bae
YK
1986 if (IS_ERR(t_alg)) {
1987 err = PTR_ERR(t_alg);
bf83490e 1988 pr_warn("%s alg allocation failed\n", alg->driver_name);
b0e09bae
YK
1989 continue;
1990 }
1991
1992 err = crypto_register_ahash(&t_alg->ahash_alg);
1993 if (err) {
6ea30f0a
RK
1994 pr_warn("%s alg registration failed: %d\n",
1995 t_alg->ahash_alg.halg.base.cra_driver_name,
1996 err);
b0e09bae
YK
1997 kfree(t_alg);
1998 } else
cfc6f11b 1999 list_add_tail(&t_alg->entry, &hash_list);
b0e09bae
YK
2000
2001 /* register unkeyed version */
bf83490e 2002 t_alg = caam_hash_alloc(alg, false);
045e3678
YK
2003 if (IS_ERR(t_alg)) {
2004 err = PTR_ERR(t_alg);
bf83490e 2005 pr_warn("%s alg allocation failed\n", alg->driver_name);
045e3678
YK
2006 continue;
2007 }
2008
2009 err = crypto_register_ahash(&t_alg->ahash_alg);
2010 if (err) {
6ea30f0a
RK
2011 pr_warn("%s alg registration failed: %d\n",
2012 t_alg->ahash_alg.halg.base.cra_driver_name,
2013 err);
045e3678
YK
2014 kfree(t_alg);
2015 } else
cfc6f11b 2016 list_add_tail(&t_alg->entry, &hash_list);
045e3678
YK
2017 }
2018
2019 return err;
2020}
2021
2022module_init(caam_algapi_hash_init);
2023module_exit(caam_algapi_hash_exit);
2024
2025MODULE_LICENSE("GPL");
2026MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2027MODULE_AUTHOR("Freescale Semiconductor - NMG");