]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/caam/caamhash.c
crypto: caam - constify pointer to descriptor buffer
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / caam / caamhash.c
CommitLineData
045e3678
YK
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
39957c8e 75#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
045e3678
YK
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
045e3678
YK
92#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
cfc6f11b
RG
97
98static struct list_head hash_list;
99
045e3678
YK
100/* ahash per-session context */
101struct caam_hash_ctx {
e11793f5
RK
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
107 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
045e3678
YK
108 dma_addr_t sh_desc_update_first_dma;
109 dma_addr_t sh_desc_fin_dma;
110 dma_addr_t sh_desc_digest_dma;
111 dma_addr_t sh_desc_finup_dma;
e11793f5 112 struct device *jrdev;
045e3678
YK
113 u32 alg_type;
114 u32 alg_op;
115 u8 key[CAAM_MAX_HASH_KEY_SIZE];
116 dma_addr_t key_dma;
117 int ctx_len;
118 unsigned int split_key_len;
119 unsigned int split_key_pad_len;
120};
121
122/* ahash state */
123struct caam_hash_state {
124 dma_addr_t buf_dma;
125 dma_addr_t ctx_dma;
126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
127 int buflen_0;
128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
129 int buflen_1;
e7472422 130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
045e3678
YK
131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
134 int current_buf;
135};
136
5ec90831
RK
137struct caam_export_state {
138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
139 u8 caam_ctx[MAX_CTX_LEN];
140 int buflen;
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
144};
145
045e3678
YK
146/* Common job descriptor seq in/out ptr routines */
147
148/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
ce572085
HG
149static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
150 struct caam_hash_state *state,
151 int ctx_len)
045e3678
YK
152{
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
154 ctx_len, DMA_FROM_DEVICE);
ce572085
HG
155 if (dma_mapping_error(jrdev, state->ctx_dma)) {
156 dev_err(jrdev, "unable to map ctx\n");
157 return -ENOMEM;
158 }
159
045e3678 160 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
ce572085
HG
161
162 return 0;
045e3678
YK
163}
164
165/* Map req->result, and append seq_out_ptr command that points to it */
166static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
167 u8 *result, int digestsize)
168{
169 dma_addr_t dst_dma;
170
171 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
172 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
173
174 return dst_dma;
175}
176
177/* Map current buffer in state and put it in link table */
178static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
179 struct sec4_sg_entry *sec4_sg,
180 u8 *buf, int buflen)
181{
182 dma_addr_t buf_dma;
183
184 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
185 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
186
187 return buf_dma;
188}
189
045e3678
YK
190/*
191 * Only put buffer in link table if it contains data, which is possible,
192 * since a buffer has previously been used, and needs to be unmapped,
193 */
194static inline dma_addr_t
195try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
196 u8 *buf, dma_addr_t buf_dma, int buflen,
197 int last_buflen)
198{
199 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
200 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
201 if (buflen)
202 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
203 else
204 buf_dma = 0;
205
206 return buf_dma;
207}
208
209/* Map state->caam_ctx, and add it to link table */
ce572085
HG
210static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
211 struct caam_hash_state *state, int ctx_len,
212 struct sec4_sg_entry *sec4_sg, u32 flag)
045e3678
YK
213{
214 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
ce572085
HG
215 if (dma_mapping_error(jrdev, state->ctx_dma)) {
216 dev_err(jrdev, "unable to map ctx\n");
217 return -ENOMEM;
218 }
219
045e3678 220 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
ce572085
HG
221
222 return 0;
045e3678
YK
223}
224
225/* Common shared descriptor commands */
226static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
227{
228 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
229 ctx->split_key_len, CLASS_2 |
230 KEY_DEST_MDHA_SPLIT | KEY_ENC);
231}
232
233/* Append key if it has been set */
234static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
235{
236 u32 *key_jump_cmd;
237
61bb86bb 238 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
239
240 if (ctx->split_key_len) {
241 /* Skip if already shared */
242 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
243 JUMP_COND_SHRD);
244
245 append_key_ahash(desc, ctx);
246
247 set_jump_tgt_here(desc, key_jump_cmd);
248 }
045e3678
YK
249}
250
251/*
252 * For ahash read data from seqin following state->caam_ctx,
253 * and write resulting class2 context to seqout, which may be state->caam_ctx
254 * or req->result
255 */
256static inline void ahash_append_load_str(u32 *desc, int digestsize)
257{
258 /* Calculate remaining bytes to read */
259 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
260
261 /* Read remaining bytes */
262 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
263 FIFOLD_TYPE_MSG | KEY_VLF);
264
265 /* Store class2 context bytes */
266 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
267 LDST_SRCDST_BYTE_CONTEXT);
268}
269
270/*
271 * For ahash update, final and finup, import context, read and write to seqout
272 */
273static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
274 int digestsize,
275 struct caam_hash_ctx *ctx)
276{
277 init_sh_desc_key_ahash(desc, ctx);
278
279 /* Import context from software */
280 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
281 LDST_CLASS_2_CCB | ctx->ctx_len);
282
283 /* Class 2 operation */
284 append_operation(desc, op | state | OP_ALG_ENCRYPT);
285
286 /*
287 * Load from buf and/or src and write to req->result or state->context
288 */
289 ahash_append_load_str(desc, digestsize);
290}
291
292/* For ahash firsts and digest, read and write to seqout */
293static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
294 int digestsize, struct caam_hash_ctx *ctx)
295{
296 init_sh_desc_key_ahash(desc, ctx);
297
298 /* Class 2 operation */
299 append_operation(desc, op | state | OP_ALG_ENCRYPT);
300
301 /*
302 * Load from buf and/or src and write to req->result or state->context
303 */
304 ahash_append_load_str(desc, digestsize);
305}
306
307static int ahash_set_sh_desc(struct crypto_ahash *ahash)
308{
309 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
310 int digestsize = crypto_ahash_digestsize(ahash);
311 struct device *jrdev = ctx->jrdev;
312 u32 have_key = 0;
313 u32 *desc;
314
315 if (ctx->split_key_len)
316 have_key = OP_ALG_AAI_HMAC_PRECOMP;
317
318 /* ahash_update shared descriptor */
319 desc = ctx->sh_desc_update;
320
61bb86bb 321 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678
YK
322
323 /* Import context from software */
324 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
325 LDST_CLASS_2_CCB | ctx->ctx_len);
326
327 /* Class 2 operation */
328 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
329 OP_ALG_ENCRYPT);
330
331 /* Load data and write to result or context */
332 ahash_append_load_str(desc, ctx->ctx_len);
333
334 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
335 DMA_TO_DEVICE);
336 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
337 dev_err(jrdev, "unable to map shared descriptor\n");
338 return -ENOMEM;
339 }
340#ifdef DEBUG
514df281
AP
341 print_hex_dump(KERN_ERR,
342 "ahash update shdesc@"__stringify(__LINE__)": ",
045e3678
YK
343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
344#endif
345
346 /* ahash_update_first shared descriptor */
347 desc = ctx->sh_desc_update_first;
348
349 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
350 ctx->ctx_len, ctx);
351
352 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
353 desc_bytes(desc),
354 DMA_TO_DEVICE);
355 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
356 dev_err(jrdev, "unable to map shared descriptor\n");
357 return -ENOMEM;
358 }
359#ifdef DEBUG
514df281
AP
360 print_hex_dump(KERN_ERR,
361 "ahash update first shdesc@"__stringify(__LINE__)": ",
045e3678
YK
362 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
363#endif
364
365 /* ahash_final shared descriptor */
366 desc = ctx->sh_desc_fin;
367
368 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
369 OP_ALG_AS_FINALIZE, digestsize, ctx);
370
371 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
372 DMA_TO_DEVICE);
373 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
374 dev_err(jrdev, "unable to map shared descriptor\n");
375 return -ENOMEM;
376 }
377#ifdef DEBUG
514df281 378 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
045e3678
YK
379 DUMP_PREFIX_ADDRESS, 16, 4, desc,
380 desc_bytes(desc), 1);
381#endif
382
383 /* ahash_finup shared descriptor */
384 desc = ctx->sh_desc_finup;
385
386 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
387 OP_ALG_AS_FINALIZE, digestsize, ctx);
388
389 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
390 DMA_TO_DEVICE);
391 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
392 dev_err(jrdev, "unable to map shared descriptor\n");
393 return -ENOMEM;
394 }
395#ifdef DEBUG
514df281 396 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
045e3678
YK
397 DUMP_PREFIX_ADDRESS, 16, 4, desc,
398 desc_bytes(desc), 1);
399#endif
400
401 /* ahash_digest shared descriptor */
402 desc = ctx->sh_desc_digest;
403
404 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
405 digestsize, ctx);
406
407 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
408 desc_bytes(desc),
409 DMA_TO_DEVICE);
410 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
411 dev_err(jrdev, "unable to map shared descriptor\n");
412 return -ENOMEM;
413 }
414#ifdef DEBUG
514df281
AP
415 print_hex_dump(KERN_ERR,
416 "ahash digest shdesc@"__stringify(__LINE__)": ",
045e3678
YK
417 DUMP_PREFIX_ADDRESS, 16, 4, desc,
418 desc_bytes(desc), 1);
419#endif
420
421 return 0;
422}
423
66b3e887 424static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
425 u32 keylen)
426{
427 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
428 ctx->split_key_pad_len, key_in, keylen,
429 ctx->alg_op);
430}
431
432/* Digest hash size if it is too large */
66b3e887 433static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
434 u32 *keylen, u8 *key_out, u32 digestsize)
435{
436 struct device *jrdev = ctx->jrdev;
437 u32 *desc;
438 struct split_key_result result;
439 dma_addr_t src_dma, dst_dma;
9e6df0fd 440 int ret;
045e3678 441
9c23b7d3 442 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
443 if (!desc) {
444 dev_err(jrdev, "unable to allocate key input memory\n");
445 return -ENOMEM;
446 }
045e3678
YK
447
448 init_job_desc(desc, 0);
449
450 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
451 DMA_TO_DEVICE);
452 if (dma_mapping_error(jrdev, src_dma)) {
453 dev_err(jrdev, "unable to map key input memory\n");
454 kfree(desc);
455 return -ENOMEM;
456 }
457 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
458 DMA_FROM_DEVICE);
459 if (dma_mapping_error(jrdev, dst_dma)) {
460 dev_err(jrdev, "unable to map key output memory\n");
461 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
462 kfree(desc);
463 return -ENOMEM;
464 }
465
466 /* Job descriptor to perform unkeyed hash on key_in */
467 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
468 OP_ALG_AS_INITFINAL);
469 append_seq_in_ptr(desc, src_dma, *keylen, 0);
470 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
471 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
472 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
473 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
474 LDST_SRCDST_BYTE_CONTEXT);
475
476#ifdef DEBUG
514df281 477 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
045e3678 478 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
514df281 479 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
480 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
481#endif
482
483 result.err = 0;
484 init_completion(&result.completion);
485
486 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
487 if (!ret) {
488 /* in progress */
489 wait_for_completion_interruptible(&result.completion);
490 ret = result.err;
491#ifdef DEBUG
514df281
AP
492 print_hex_dump(KERN_ERR,
493 "digested key@"__stringify(__LINE__)": ",
045e3678
YK
494 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
495 digestsize, 1);
496#endif
497 }
045e3678
YK
498 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
499 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
500
e11aa9f1
HG
501 *keylen = digestsize;
502
045e3678
YK
503 kfree(desc);
504
505 return ret;
506}
507
508static int ahash_setkey(struct crypto_ahash *ahash,
509 const u8 *key, unsigned int keylen)
510{
511 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
512 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
513 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
514 struct device *jrdev = ctx->jrdev;
515 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
516 int digestsize = crypto_ahash_digestsize(ahash);
9e6df0fd 517 int ret;
045e3678
YK
518 u8 *hashed_key = NULL;
519
520#ifdef DEBUG
521 printk(KERN_ERR "keylen %d\n", keylen);
522#endif
523
524 if (keylen > blocksize) {
e7a33c4d
ME
525 hashed_key = kmalloc_array(digestsize,
526 sizeof(*hashed_key),
527 GFP_KERNEL | GFP_DMA);
045e3678
YK
528 if (!hashed_key)
529 return -ENOMEM;
530 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
531 digestsize);
532 if (ret)
d6e7a7d0 533 goto bad_free_key;
045e3678
YK
534 key = hashed_key;
535 }
536
537 /* Pick class 2 key length from algorithm submask */
538 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
539 OP_ALG_ALGSEL_SHIFT] * 2;
540 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
541
542#ifdef DEBUG
543 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
544 ctx->split_key_len, ctx->split_key_pad_len);
514df281 545 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
045e3678
YK
546 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
547#endif
548
549 ret = gen_split_hash_key(ctx, key, keylen);
550 if (ret)
d6e7a7d0 551 goto bad_free_key;
045e3678
YK
552
553 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
554 DMA_TO_DEVICE);
555 if (dma_mapping_error(jrdev, ctx->key_dma)) {
556 dev_err(jrdev, "unable to map key i/o memory\n");
3d67be27 557 ret = -ENOMEM;
d6e7a7d0 558 goto error_free_key;
045e3678
YK
559 }
560#ifdef DEBUG
514df281 561 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
045e3678
YK
562 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
563 ctx->split_key_pad_len, 1);
564#endif
565
566 ret = ahash_set_sh_desc(ahash);
567 if (ret) {
568 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
569 DMA_TO_DEVICE);
570 }
d6e7a7d0 571 error_free_key:
045e3678
YK
572 kfree(hashed_key);
573 return ret;
d6e7a7d0 574 bad_free_key:
045e3678
YK
575 kfree(hashed_key);
576 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
577 return -EINVAL;
578}
579
580/*
581 * ahash_edesc - s/w-extended ahash descriptor
582 * @dst_dma: physical mapped address of req->result
583 * @sec4_sg_dma: physical mapped address of h/w link table
584 * @src_nents: number of segments in input scatterlist
585 * @sec4_sg_bytes: length of dma mapped sec4_sg space
045e3678 586 * @hw_desc: the h/w job descriptor followed by any referenced link tables
343e44b1 587 * @sec4_sg: h/w link table
045e3678
YK
588 */
589struct ahash_edesc {
590 dma_addr_t dst_dma;
591 dma_addr_t sec4_sg_dma;
592 int src_nents;
593 int sec4_sg_bytes;
d7b24ed4 594 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
343e44b1 595 struct sec4_sg_entry sec4_sg[0];
045e3678
YK
596};
597
598static inline void ahash_unmap(struct device *dev,
599 struct ahash_edesc *edesc,
600 struct ahash_request *req, int dst_len)
601{
602 if (edesc->src_nents)
13fb8fd7 603 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
045e3678
YK
604 if (edesc->dst_dma)
605 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
606
607 if (edesc->sec4_sg_bytes)
608 dma_unmap_single(dev, edesc->sec4_sg_dma,
609 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
610}
611
612static inline void ahash_unmap_ctx(struct device *dev,
613 struct ahash_edesc *edesc,
614 struct ahash_request *req, int dst_len, u32 flag)
615{
616 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
617 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
618 struct caam_hash_state *state = ahash_request_ctx(req);
619
620 if (state->ctx_dma)
621 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
622 ahash_unmap(dev, edesc, req, dst_len);
623}
624
625static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
626 void *context)
627{
628 struct ahash_request *req = context;
629 struct ahash_edesc *edesc;
630 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
631 int digestsize = crypto_ahash_digestsize(ahash);
632#ifdef DEBUG
633 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
634 struct caam_hash_state *state = ahash_request_ctx(req);
635
636 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
637#endif
638
4ca7c7d8 639 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
640 if (err)
641 caam_jr_strstatus(jrdev, err);
045e3678
YK
642
643 ahash_unmap(jrdev, edesc, req, digestsize);
644 kfree(edesc);
645
646#ifdef DEBUG
514df281 647 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
648 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
649 ctx->ctx_len, 1);
650 if (req->result)
514df281 651 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
652 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
653 digestsize, 1);
654#endif
655
656 req->base.complete(&req->base, err);
657}
658
659static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
660 void *context)
661{
662 struct ahash_request *req = context;
663 struct ahash_edesc *edesc;
664 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
665 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
666#ifdef DEBUG
667 struct caam_hash_state *state = ahash_request_ctx(req);
668 int digestsize = crypto_ahash_digestsize(ahash);
669
670 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
671#endif
672
4ca7c7d8 673 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
674 if (err)
675 caam_jr_strstatus(jrdev, err);
045e3678
YK
676
677 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
678 kfree(edesc);
679
680#ifdef DEBUG
514df281 681 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
682 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
683 ctx->ctx_len, 1);
684 if (req->result)
514df281 685 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
686 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
687 digestsize, 1);
688#endif
689
690 req->base.complete(&req->base, err);
691}
692
693static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
694 void *context)
695{
696 struct ahash_request *req = context;
697 struct ahash_edesc *edesc;
698 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
699 int digestsize = crypto_ahash_digestsize(ahash);
700#ifdef DEBUG
701 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
702 struct caam_hash_state *state = ahash_request_ctx(req);
703
704 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
705#endif
706
4ca7c7d8 707 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
708 if (err)
709 caam_jr_strstatus(jrdev, err);
045e3678 710
bc9e05f9 711 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
045e3678
YK
712 kfree(edesc);
713
714#ifdef DEBUG
514df281 715 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
717 ctx->ctx_len, 1);
718 if (req->result)
514df281 719 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
720 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
721 digestsize, 1);
722#endif
723
724 req->base.complete(&req->base, err);
725}
726
727static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
728 void *context)
729{
730 struct ahash_request *req = context;
731 struct ahash_edesc *edesc;
732 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
733 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
734#ifdef DEBUG
735 struct caam_hash_state *state = ahash_request_ctx(req);
736 int digestsize = crypto_ahash_digestsize(ahash);
737
738 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
739#endif
740
4ca7c7d8 741 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
742 if (err)
743 caam_jr_strstatus(jrdev, err);
045e3678 744
ef62b231 745 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
045e3678
YK
746 kfree(edesc);
747
748#ifdef DEBUG
514df281 749 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
750 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
751 ctx->ctx_len, 1);
752 if (req->result)
514df281 753 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
754 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
755 digestsize, 1);
756#endif
757
758 req->base.complete(&req->base, err);
759}
760
5588d039
RK
761/*
762 * Allocate an enhanced descriptor, which contains the hardware descriptor
763 * and space for hardware scatter table containing sg_num entries.
764 */
765static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
30a43b44
RK
766 int sg_num, u32 *sh_desc,
767 dma_addr_t sh_desc_dma,
768 gfp_t flags)
5588d039
RK
769{
770 struct ahash_edesc *edesc;
771 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
772
773 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
774 if (!edesc) {
775 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
776 return NULL;
777 }
778
30a43b44
RK
779 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
780 HDR_SHARE_DEFER | HDR_REVERSE);
781
5588d039
RK
782 return edesc;
783}
784
65cf164a
RK
785static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
786 struct ahash_edesc *edesc,
787 struct ahash_request *req, int nents,
788 unsigned int first_sg,
789 unsigned int first_bytes, size_t to_hash)
790{
791 dma_addr_t src_dma;
792 u32 options;
793
794 if (nents > 1 || first_sg) {
795 struct sec4_sg_entry *sg = edesc->sec4_sg;
796 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
797
798 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
799
800 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
801 if (dma_mapping_error(ctx->jrdev, src_dma)) {
802 dev_err(ctx->jrdev, "unable to map S/G table\n");
803 return -ENOMEM;
804 }
805
806 edesc->sec4_sg_bytes = sgsize;
807 edesc->sec4_sg_dma = src_dma;
808 options = LDST_SGF;
809 } else {
810 src_dma = sg_dma_address(req->src);
811 options = 0;
812 }
813
814 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
815 options);
816
817 return 0;
818}
819
045e3678
YK
820/* submit update job descriptor */
821static int ahash_update_ctx(struct ahash_request *req)
822{
823 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
824 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
825 struct caam_hash_state *state = ahash_request_ctx(req);
826 struct device *jrdev = ctx->jrdev;
827 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
828 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
829 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
830 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
831 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
832 int *next_buflen = state->current_buf ? &state->buflen_0 :
833 &state->buflen_1, last_buflen;
834 int in_len = *buflen + req->nbytes, to_hash;
30a43b44 835 u32 *desc;
bc13c69e 836 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
837 struct ahash_edesc *edesc;
838 int ret = 0;
045e3678
YK
839
840 last_buflen = *next_buflen;
841 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
842 to_hash = in_len - *next_buflen;
843
844 if (to_hash) {
13fb8fd7
LC
845 src_nents = sg_nents_for_len(req->src,
846 req->nbytes - (*next_buflen));
f9970c28
LC
847 if (src_nents < 0) {
848 dev_err(jrdev, "Invalid number of src SG.\n");
849 return src_nents;
850 }
bc13c69e
RK
851
852 if (src_nents) {
853 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
854 DMA_TO_DEVICE);
855 if (!mapped_nents) {
856 dev_err(jrdev, "unable to DMA map source\n");
857 return -ENOMEM;
858 }
859 } else {
860 mapped_nents = 0;
861 }
862
045e3678 863 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
bc13c69e 864 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
865 sizeof(struct sec4_sg_entry);
866
867 /*
868 * allocate space for base edesc and hw desc commands,
869 * link tables
870 */
5588d039 871 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
30a43b44
RK
872 ctx->sh_desc_update,
873 ctx->sh_desc_update_dma, flags);
045e3678 874 if (!edesc) {
bc13c69e 875 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
876 return -ENOMEM;
877 }
878
879 edesc->src_nents = src_nents;
880 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 881
ce572085
HG
882 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
883 edesc->sec4_sg, DMA_BIDIRECTIONAL);
884 if (ret)
58b0e5d0 885 goto unmap_ctx;
045e3678
YK
886
887 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
888 edesc->sec4_sg + 1,
889 buf, state->buf_dma,
c7556ff7 890 *buflen, last_buflen);
045e3678 891
bc13c69e
RK
892 if (mapped_nents) {
893 sg_to_sec4_sg_last(req->src, mapped_nents,
894 edesc->sec4_sg + sec4_sg_src_index,
895 0);
8af7b0f8 896 if (*next_buflen)
307fd543
CS
897 scatterwalk_map_and_copy(next_buf, req->src,
898 to_hash - *buflen,
899 *next_buflen, 0);
045e3678
YK
900 } else {
901 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
261ea058 902 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678
YK
903 }
904
8af7b0f8
VM
905 state->current_buf = !state->current_buf;
906
045e3678 907 desc = edesc->hw_desc;
045e3678 908
1da2be33
RG
909 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 sec4_sg_bytes,
911 DMA_TO_DEVICE);
ce572085
HG
912 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
913 dev_err(jrdev, "unable to map S/G table\n");
32686d34 914 ret = -ENOMEM;
58b0e5d0 915 goto unmap_ctx;
ce572085 916 }
1da2be33 917
045e3678
YK
918 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
919 to_hash, LDST_SGF);
920
921 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
922
923#ifdef DEBUG
514df281 924 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
925 DUMP_PREFIX_ADDRESS, 16, 4, desc,
926 desc_bytes(desc), 1);
927#endif
928
929 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
32686d34 930 if (ret)
58b0e5d0 931 goto unmap_ctx;
32686d34
RK
932
933 ret = -EINPROGRESS;
045e3678 934 } else if (*next_buflen) {
307fd543
CS
935 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
936 req->nbytes, 0);
045e3678
YK
937 *buflen = *next_buflen;
938 *next_buflen = last_buflen;
939 }
940#ifdef DEBUG
514df281 941 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 942 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 943 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
944 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
945 *next_buflen, 1);
946#endif
947
948 return ret;
58b0e5d0 949 unmap_ctx:
32686d34
RK
950 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
951 kfree(edesc);
952 return ret;
045e3678
YK
953}
954
955static int ahash_final_ctx(struct ahash_request *req)
956{
957 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
958 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
959 struct caam_hash_state *state = ahash_request_ctx(req);
960 struct device *jrdev = ctx->jrdev;
961 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
962 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
963 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
964 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
965 int last_buflen = state->current_buf ? state->buflen_0 :
966 state->buflen_1;
30a43b44 967 u32 *desc;
b310c178 968 int sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
969 int digestsize = crypto_ahash_digestsize(ahash);
970 struct ahash_edesc *edesc;
9e6df0fd 971 int ret;
045e3678 972
b310c178
HG
973 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
974 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
045e3678
YK
975
976 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
977 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
978 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
979 flags);
5588d039 980 if (!edesc)
045e3678 981 return -ENOMEM;
045e3678 982
045e3678 983 desc = edesc->hw_desc;
045e3678
YK
984
985 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
986 edesc->src_nents = 0;
987
ce572085
HG
988 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
989 edesc->sec4_sg, DMA_TO_DEVICE);
990 if (ret)
58b0e5d0 991 goto unmap_ctx;
045e3678
YK
992
993 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
994 buf, state->buf_dma, buflen,
995 last_buflen);
261ea058
HG
996 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
997 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678 998
1da2be33
RG
999 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1000 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
1001 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1002 dev_err(jrdev, "unable to map S/G table\n");
32686d34 1003 ret = -ENOMEM;
58b0e5d0 1004 goto unmap_ctx;
ce572085 1005 }
1da2be33 1006
045e3678
YK
1007 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
1008 LDST_SGF);
1009
1010 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1011 digestsize);
ce572085
HG
1012 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1013 dev_err(jrdev, "unable to map dst\n");
32686d34 1014 ret = -ENOMEM;
58b0e5d0 1015 goto unmap_ctx;
ce572085 1016 }
045e3678
YK
1017
1018#ifdef DEBUG
514df281 1019 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1020 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1021#endif
1022
1023 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 1024 if (ret)
58b0e5d0 1025 goto unmap_ctx;
045e3678 1026
32686d34 1027 return -EINPROGRESS;
58b0e5d0 1028 unmap_ctx:
32686d34
RK
1029 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1030 kfree(edesc);
045e3678
YK
1031 return ret;
1032}
1033
1034static int ahash_finup_ctx(struct ahash_request *req)
1035{
1036 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1038 struct caam_hash_state *state = ahash_request_ctx(req);
1039 struct device *jrdev = ctx->jrdev;
1040 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1041 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1042 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1043 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1044 int last_buflen = state->current_buf ? state->buflen_0 :
1045 state->buflen_1;
30a43b44 1046 u32 *desc;
65cf164a 1047 int sec4_sg_src_index;
bc13c69e 1048 int src_nents, mapped_nents;
045e3678
YK
1049 int digestsize = crypto_ahash_digestsize(ahash);
1050 struct ahash_edesc *edesc;
9e6df0fd 1051 int ret;
045e3678 1052
13fb8fd7 1053 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1054 if (src_nents < 0) {
1055 dev_err(jrdev, "Invalid number of src SG.\n");
1056 return src_nents;
1057 }
bc13c69e
RK
1058
1059 if (src_nents) {
1060 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1061 DMA_TO_DEVICE);
1062 if (!mapped_nents) {
1063 dev_err(jrdev, "unable to DMA map source\n");
1064 return -ENOMEM;
1065 }
1066 } else {
1067 mapped_nents = 0;
1068 }
1069
045e3678 1070 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
045e3678
YK
1071
1072 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1073 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
30a43b44 1074 ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
5588d039 1075 flags);
045e3678 1076 if (!edesc) {
bc13c69e 1077 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1078 return -ENOMEM;
1079 }
1080
045e3678 1081 desc = edesc->hw_desc;
045e3678
YK
1082
1083 edesc->src_nents = src_nents;
045e3678 1084
ce572085
HG
1085 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
1086 edesc->sec4_sg, DMA_TO_DEVICE);
1087 if (ret)
58b0e5d0 1088 goto unmap_ctx;
045e3678
YK
1089
1090 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
1091 buf, state->buf_dma, buflen,
1092 last_buflen);
1093
65cf164a
RK
1094 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1095 sec4_sg_src_index, ctx->ctx_len + buflen,
1096 req->nbytes);
1097 if (ret)
58b0e5d0 1098 goto unmap_ctx;
045e3678
YK
1099
1100 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1101 digestsize);
ce572085
HG
1102 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1103 dev_err(jrdev, "unable to map dst\n");
32686d34 1104 ret = -ENOMEM;
58b0e5d0 1105 goto unmap_ctx;
ce572085 1106 }
045e3678
YK
1107
1108#ifdef DEBUG
514df281 1109 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1110 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1111#endif
1112
1113 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 1114 if (ret)
58b0e5d0 1115 goto unmap_ctx;
045e3678 1116
32686d34 1117 return -EINPROGRESS;
58b0e5d0 1118 unmap_ctx:
32686d34
RK
1119 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1120 kfree(edesc);
045e3678
YK
1121 return ret;
1122}
1123
1124static int ahash_digest(struct ahash_request *req)
1125{
1126 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1127 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1128 struct device *jrdev = ctx->jrdev;
1129 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1130 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
30a43b44 1131 u32 *desc;
045e3678 1132 int digestsize = crypto_ahash_digestsize(ahash);
65cf164a 1133 int src_nents, mapped_nents;
045e3678 1134 struct ahash_edesc *edesc;
9e6df0fd 1135 int ret;
045e3678 1136
3d5a2db6 1137 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1138 if (src_nents < 0) {
1139 dev_err(jrdev, "Invalid number of src SG.\n");
1140 return src_nents;
1141 }
bc13c69e
RK
1142
1143 if (src_nents) {
1144 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1145 DMA_TO_DEVICE);
1146 if (!mapped_nents) {
1147 dev_err(jrdev, "unable to map source for DMA\n");
1148 return -ENOMEM;
1149 }
1150 } else {
1151 mapped_nents = 0;
1152 }
1153
045e3678 1154 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1155 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
30a43b44 1156 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
5588d039 1157 flags);
045e3678 1158 if (!edesc) {
bc13c69e 1159 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1160 return -ENOMEM;
1161 }
343e44b1 1162
045e3678
YK
1163 edesc->src_nents = src_nents;
1164
65cf164a
RK
1165 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1166 req->nbytes);
1167 if (ret) {
1168 ahash_unmap(jrdev, edesc, req, digestsize);
1169 kfree(edesc);
1170 return ret;
045e3678 1171 }
65cf164a
RK
1172
1173 desc = edesc->hw_desc;
045e3678
YK
1174
1175 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1176 digestsize);
ce572085
HG
1177 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1178 dev_err(jrdev, "unable to map dst\n");
32686d34
RK
1179 ahash_unmap(jrdev, edesc, req, digestsize);
1180 kfree(edesc);
ce572085
HG
1181 return -ENOMEM;
1182 }
045e3678
YK
1183
1184#ifdef DEBUG
514df281 1185 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1186 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1187#endif
1188
1189 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1190 if (!ret) {
1191 ret = -EINPROGRESS;
1192 } else {
1193 ahash_unmap(jrdev, edesc, req, digestsize);
1194 kfree(edesc);
1195 }
1196
1197 return ret;
1198}
1199
1200/* submit ahash final if it the first job descriptor */
1201static int ahash_final_no_ctx(struct ahash_request *req)
1202{
1203 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1204 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1205 struct caam_hash_state *state = ahash_request_ctx(req);
1206 struct device *jrdev = ctx->jrdev;
1207 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1208 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1209 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1210 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
30a43b44 1211 u32 *desc;
045e3678
YK
1212 int digestsize = crypto_ahash_digestsize(ahash);
1213 struct ahash_edesc *edesc;
9e6df0fd 1214 int ret;
045e3678
YK
1215
1216 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1217 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1218 ctx->sh_desc_digest_dma, flags);
5588d039 1219 if (!edesc)
045e3678 1220 return -ENOMEM;
045e3678 1221
045e3678 1222 desc = edesc->hw_desc;
045e3678
YK
1223
1224 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
ce572085
HG
1225 if (dma_mapping_error(jrdev, state->buf_dma)) {
1226 dev_err(jrdev, "unable to map src\n");
06435f34 1227 goto unmap;
ce572085 1228 }
045e3678
YK
1229
1230 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1231
1232 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1233 digestsize);
ce572085
HG
1234 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1235 dev_err(jrdev, "unable to map dst\n");
06435f34 1236 goto unmap;
ce572085 1237 }
045e3678
YK
1238 edesc->src_nents = 0;
1239
1240#ifdef DEBUG
514df281 1241 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1242 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1243#endif
1244
1245 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1246 if (!ret) {
1247 ret = -EINPROGRESS;
1248 } else {
1249 ahash_unmap(jrdev, edesc, req, digestsize);
1250 kfree(edesc);
1251 }
1252
1253 return ret;
06435f34
ME
1254 unmap:
1255 ahash_unmap(jrdev, edesc, req, digestsize);
1256 kfree(edesc);
1257 return -ENOMEM;
1258
045e3678
YK
1259}
1260
1261/* submit ahash update if it the first job descriptor after update */
1262static int ahash_update_no_ctx(struct ahash_request *req)
1263{
1264 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1265 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1266 struct caam_hash_state *state = ahash_request_ctx(req);
1267 struct device *jrdev = ctx->jrdev;
1268 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1269 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1270 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1271 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1272 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1273 int *next_buflen = state->current_buf ? &state->buflen_0 :
1274 &state->buflen_1;
1275 int in_len = *buflen + req->nbytes, to_hash;
bc13c69e 1276 int sec4_sg_bytes, src_nents, mapped_nents;
045e3678 1277 struct ahash_edesc *edesc;
30a43b44 1278 u32 *desc;
045e3678 1279 int ret = 0;
045e3678
YK
1280
1281 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1282 to_hash = in_len - *next_buflen;
1283
1284 if (to_hash) {
13fb8fd7 1285 src_nents = sg_nents_for_len(req->src,
3d5a2db6 1286 req->nbytes - *next_buflen);
f9970c28
LC
1287 if (src_nents < 0) {
1288 dev_err(jrdev, "Invalid number of src SG.\n");
1289 return src_nents;
1290 }
bc13c69e
RK
1291
1292 if (src_nents) {
1293 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1294 DMA_TO_DEVICE);
1295 if (!mapped_nents) {
1296 dev_err(jrdev, "unable to DMA map source\n");
1297 return -ENOMEM;
1298 }
1299 } else {
1300 mapped_nents = 0;
1301 }
1302
1303 sec4_sg_bytes = (1 + mapped_nents) *
045e3678
YK
1304 sizeof(struct sec4_sg_entry);
1305
1306 /*
1307 * allocate space for base edesc and hw desc commands,
1308 * link tables
1309 */
30a43b44
RK
1310 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1311 ctx->sh_desc_update_first,
1312 ctx->sh_desc_update_first_dma,
1313 flags);
045e3678 1314 if (!edesc) {
bc13c69e 1315 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1316 return -ENOMEM;
1317 }
1318
1319 edesc->src_nents = src_nents;
1320 edesc->sec4_sg_bytes = sec4_sg_bytes;
76b99080 1321 edesc->dst_dma = 0;
045e3678
YK
1322
1323 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1324 buf, *buflen);
bc13c69e
RK
1325 sg_to_sec4_sg_last(req->src, mapped_nents,
1326 edesc->sec4_sg + 1, 0);
1327
045e3678 1328 if (*next_buflen) {
307fd543
CS
1329 scatterwalk_map_and_copy(next_buf, req->src,
1330 to_hash - *buflen,
1331 *next_buflen, 0);
045e3678
YK
1332 }
1333
8af7b0f8
VM
1334 state->current_buf = !state->current_buf;
1335
045e3678 1336 desc = edesc->hw_desc;
045e3678 1337
1da2be33
RG
1338 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1339 sec4_sg_bytes,
1340 DMA_TO_DEVICE);
ce572085
HG
1341 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1342 dev_err(jrdev, "unable to map S/G table\n");
32686d34 1343 ret = -ENOMEM;
58b0e5d0 1344 goto unmap_ctx;
ce572085 1345 }
1da2be33 1346
045e3678
YK
1347 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1348
ce572085
HG
1349 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1350 if (ret)
58b0e5d0 1351 goto unmap_ctx;
045e3678
YK
1352
1353#ifdef DEBUG
514df281 1354 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1355 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1356 desc_bytes(desc), 1);
1357#endif
1358
1359 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
32686d34 1360 if (ret)
58b0e5d0 1361 goto unmap_ctx;
32686d34
RK
1362
1363 ret = -EINPROGRESS;
1364 state->update = ahash_update_ctx;
1365 state->finup = ahash_finup_ctx;
1366 state->final = ahash_final_ctx;
045e3678 1367 } else if (*next_buflen) {
307fd543
CS
1368 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1369 req->nbytes, 0);
045e3678
YK
1370 *buflen = *next_buflen;
1371 *next_buflen = 0;
1372 }
1373#ifdef DEBUG
514df281 1374 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 1375 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 1376 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1377 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1378 *next_buflen, 1);
1379#endif
1380
1381 return ret;
58b0e5d0 1382 unmap_ctx:
32686d34
RK
1383 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1384 kfree(edesc);
1385 return ret;
045e3678
YK
1386}
1387
1388/* submit ahash finup if it the first job descriptor after update */
1389static int ahash_finup_no_ctx(struct ahash_request *req)
1390{
1391 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1392 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1393 struct caam_hash_state *state = ahash_request_ctx(req);
1394 struct device *jrdev = ctx->jrdev;
1395 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1396 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1397 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1398 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1399 int last_buflen = state->current_buf ? state->buflen_0 :
1400 state->buflen_1;
30a43b44 1401 u32 *desc;
bc13c69e 1402 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
045e3678
YK
1403 int digestsize = crypto_ahash_digestsize(ahash);
1404 struct ahash_edesc *edesc;
9e6df0fd 1405 int ret;
045e3678 1406
13fb8fd7 1407 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1408 if (src_nents < 0) {
1409 dev_err(jrdev, "Invalid number of src SG.\n");
1410 return src_nents;
1411 }
bc13c69e
RK
1412
1413 if (src_nents) {
1414 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1415 DMA_TO_DEVICE);
1416 if (!mapped_nents) {
1417 dev_err(jrdev, "unable to DMA map source\n");
1418 return -ENOMEM;
1419 }
1420 } else {
1421 mapped_nents = 0;
1422 }
1423
045e3678 1424 sec4_sg_src_index = 2;
bc13c69e 1425 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
1426 sizeof(struct sec4_sg_entry);
1427
1428 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1429 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1430 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1431 flags);
045e3678 1432 if (!edesc) {
bc13c69e 1433 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1434 return -ENOMEM;
1435 }
1436
045e3678 1437 desc = edesc->hw_desc;
045e3678
YK
1438
1439 edesc->src_nents = src_nents;
1440 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
1441
1442 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1443 state->buf_dma, buflen,
1444 last_buflen);
1445
65cf164a
RK
1446 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1447 req->nbytes);
1448 if (ret) {
ce572085 1449 dev_err(jrdev, "unable to map S/G table\n");
06435f34 1450 goto unmap;
ce572085 1451 }
1da2be33 1452
045e3678
YK
1453 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1454 digestsize);
ce572085
HG
1455 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1456 dev_err(jrdev, "unable to map dst\n");
06435f34 1457 goto unmap;
ce572085 1458 }
045e3678
YK
1459
1460#ifdef DEBUG
514df281 1461 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1462 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1463#endif
1464
1465 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1466 if (!ret) {
1467 ret = -EINPROGRESS;
1468 } else {
1469 ahash_unmap(jrdev, edesc, req, digestsize);
1470 kfree(edesc);
1471 }
1472
1473 return ret;
06435f34
ME
1474 unmap:
1475 ahash_unmap(jrdev, edesc, req, digestsize);
1476 kfree(edesc);
1477 return -ENOMEM;
1478
045e3678
YK
1479}
1480
1481/* submit first update job descriptor after init */
1482static int ahash_update_first(struct ahash_request *req)
1483{
1484 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1485 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1486 struct caam_hash_state *state = ahash_request_ctx(req);
1487 struct device *jrdev = ctx->jrdev;
1488 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1489 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
4451d494
CS
1490 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1491 int *next_buflen = state->current_buf ?
1492 &state->buflen_1 : &state->buflen_0;
045e3678 1493 int to_hash;
30a43b44 1494 u32 *desc;
65cf164a 1495 int src_nents, mapped_nents;
045e3678
YK
1496 struct ahash_edesc *edesc;
1497 int ret = 0;
045e3678
YK
1498
1499 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1500 1);
1501 to_hash = req->nbytes - *next_buflen;
1502
1503 if (to_hash) {
3d5a2db6
RK
1504 src_nents = sg_nents_for_len(req->src,
1505 req->nbytes - *next_buflen);
f9970c28
LC
1506 if (src_nents < 0) {
1507 dev_err(jrdev, "Invalid number of src SG.\n");
1508 return src_nents;
1509 }
bc13c69e
RK
1510
1511 if (src_nents) {
1512 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1513 DMA_TO_DEVICE);
1514 if (!mapped_nents) {
1515 dev_err(jrdev, "unable to map source for DMA\n");
1516 return -ENOMEM;
1517 }
1518 } else {
1519 mapped_nents = 0;
1520 }
045e3678
YK
1521
1522 /*
1523 * allocate space for base edesc and hw desc commands,
1524 * link tables
1525 */
5588d039 1526 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
30a43b44
RK
1527 mapped_nents : 0,
1528 ctx->sh_desc_update_first,
1529 ctx->sh_desc_update_first_dma,
1530 flags);
045e3678 1531 if (!edesc) {
bc13c69e 1532 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1533 return -ENOMEM;
1534 }
1535
1536 edesc->src_nents = src_nents;
76b99080 1537 edesc->dst_dma = 0;
045e3678 1538
65cf164a
RK
1539 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1540 to_hash);
1541 if (ret)
58b0e5d0 1542 goto unmap_ctx;
045e3678
YK
1543
1544 if (*next_buflen)
307fd543
CS
1545 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1546 *next_buflen, 0);
045e3678 1547
045e3678 1548 desc = edesc->hw_desc;
045e3678 1549
ce572085
HG
1550 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1551 if (ret)
58b0e5d0 1552 goto unmap_ctx;
045e3678
YK
1553
1554#ifdef DEBUG
514df281 1555 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1556 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1557 desc_bytes(desc), 1);
1558#endif
1559
32686d34
RK
1560 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1561 if (ret)
58b0e5d0 1562 goto unmap_ctx;
32686d34
RK
1563
1564 ret = -EINPROGRESS;
1565 state->update = ahash_update_ctx;
1566 state->finup = ahash_finup_ctx;
1567 state->final = ahash_final_ctx;
045e3678
YK
1568 } else if (*next_buflen) {
1569 state->update = ahash_update_no_ctx;
1570 state->finup = ahash_finup_no_ctx;
1571 state->final = ahash_final_no_ctx;
307fd543
CS
1572 scatterwalk_map_and_copy(next_buf, req->src, 0,
1573 req->nbytes, 0);
045e3678
YK
1574 }
1575#ifdef DEBUG
514df281 1576 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1577 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1578 *next_buflen, 1);
1579#endif
1580
1581 return ret;
58b0e5d0 1582 unmap_ctx:
32686d34
RK
1583 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1584 kfree(edesc);
1585 return ret;
045e3678
YK
1586}
1587
1588static int ahash_finup_first(struct ahash_request *req)
1589{
1590 return ahash_digest(req);
1591}
1592
1593static int ahash_init(struct ahash_request *req)
1594{
1595 struct caam_hash_state *state = ahash_request_ctx(req);
1596
1597 state->update = ahash_update_first;
1598 state->finup = ahash_finup_first;
1599 state->final = ahash_final_no_ctx;
1600
1601 state->current_buf = 0;
de0e35ec 1602 state->buf_dma = 0;
6fd4b156
SC
1603 state->buflen_0 = 0;
1604 state->buflen_1 = 0;
045e3678
YK
1605
1606 return 0;
1607}
1608
1609static int ahash_update(struct ahash_request *req)
1610{
1611 struct caam_hash_state *state = ahash_request_ctx(req);
1612
1613 return state->update(req);
1614}
1615
1616static int ahash_finup(struct ahash_request *req)
1617{
1618 struct caam_hash_state *state = ahash_request_ctx(req);
1619
1620 return state->finup(req);
1621}
1622
1623static int ahash_final(struct ahash_request *req)
1624{
1625 struct caam_hash_state *state = ahash_request_ctx(req);
1626
1627 return state->final(req);
1628}
1629
1630static int ahash_export(struct ahash_request *req, void *out)
1631{
045e3678 1632 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831
RK
1633 struct caam_export_state *export = out;
1634 int len;
1635 u8 *buf;
045e3678 1636
5ec90831
RK
1637 if (state->current_buf) {
1638 buf = state->buf_1;
1639 len = state->buflen_1;
1640 } else {
1641 buf = state->buf_0;
f456cd2d 1642 len = state->buflen_0;
5ec90831
RK
1643 }
1644
1645 memcpy(export->buf, buf, len);
1646 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1647 export->buflen = len;
1648 export->update = state->update;
1649 export->final = state->final;
1650 export->finup = state->finup;
434b4212 1651
045e3678
YK
1652 return 0;
1653}
1654
1655static int ahash_import(struct ahash_request *req, const void *in)
1656{
045e3678 1657 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831 1658 const struct caam_export_state *export = in;
045e3678 1659
5ec90831
RK
1660 memset(state, 0, sizeof(*state));
1661 memcpy(state->buf_0, export->buf, export->buflen);
1662 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1663 state->buflen_0 = export->buflen;
1664 state->update = export->update;
1665 state->final = export->final;
1666 state->finup = export->finup;
434b4212 1667
045e3678
YK
1668 return 0;
1669}
1670
1671struct caam_hash_template {
1672 char name[CRYPTO_MAX_ALG_NAME];
1673 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1674 char hmac_name[CRYPTO_MAX_ALG_NAME];
1675 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1676 unsigned int blocksize;
1677 struct ahash_alg template_ahash;
1678 u32 alg_type;
1679 u32 alg_op;
1680};
1681
1682/* ahash descriptors */
1683static struct caam_hash_template driver_hash[] = {
1684 {
b0e09bae
YK
1685 .name = "sha1",
1686 .driver_name = "sha1-caam",
1687 .hmac_name = "hmac(sha1)",
1688 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1689 .blocksize = SHA1_BLOCK_SIZE,
1690 .template_ahash = {
1691 .init = ahash_init,
1692 .update = ahash_update,
1693 .final = ahash_final,
1694 .finup = ahash_finup,
1695 .digest = ahash_digest,
1696 .export = ahash_export,
1697 .import = ahash_import,
1698 .setkey = ahash_setkey,
1699 .halg = {
1700 .digestsize = SHA1_DIGEST_SIZE,
5ec90831 1701 .statesize = sizeof(struct caam_export_state),
045e3678 1702 },
659f313d 1703 },
045e3678
YK
1704 .alg_type = OP_ALG_ALGSEL_SHA1,
1705 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1706 }, {
b0e09bae
YK
1707 .name = "sha224",
1708 .driver_name = "sha224-caam",
1709 .hmac_name = "hmac(sha224)",
1710 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1711 .blocksize = SHA224_BLOCK_SIZE,
1712 .template_ahash = {
1713 .init = ahash_init,
1714 .update = ahash_update,
1715 .final = ahash_final,
1716 .finup = ahash_finup,
1717 .digest = ahash_digest,
1718 .export = ahash_export,
1719 .import = ahash_import,
1720 .setkey = ahash_setkey,
1721 .halg = {
1722 .digestsize = SHA224_DIGEST_SIZE,
5ec90831 1723 .statesize = sizeof(struct caam_export_state),
045e3678 1724 },
659f313d 1725 },
045e3678
YK
1726 .alg_type = OP_ALG_ALGSEL_SHA224,
1727 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1728 }, {
b0e09bae
YK
1729 .name = "sha256",
1730 .driver_name = "sha256-caam",
1731 .hmac_name = "hmac(sha256)",
1732 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1733 .blocksize = SHA256_BLOCK_SIZE,
1734 .template_ahash = {
1735 .init = ahash_init,
1736 .update = ahash_update,
1737 .final = ahash_final,
1738 .finup = ahash_finup,
1739 .digest = ahash_digest,
1740 .export = ahash_export,
1741 .import = ahash_import,
1742 .setkey = ahash_setkey,
1743 .halg = {
1744 .digestsize = SHA256_DIGEST_SIZE,
5ec90831 1745 .statesize = sizeof(struct caam_export_state),
045e3678 1746 },
659f313d 1747 },
045e3678
YK
1748 .alg_type = OP_ALG_ALGSEL_SHA256,
1749 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1750 }, {
b0e09bae
YK
1751 .name = "sha384",
1752 .driver_name = "sha384-caam",
1753 .hmac_name = "hmac(sha384)",
1754 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1755 .blocksize = SHA384_BLOCK_SIZE,
1756 .template_ahash = {
1757 .init = ahash_init,
1758 .update = ahash_update,
1759 .final = ahash_final,
1760 .finup = ahash_finup,
1761 .digest = ahash_digest,
1762 .export = ahash_export,
1763 .import = ahash_import,
1764 .setkey = ahash_setkey,
1765 .halg = {
1766 .digestsize = SHA384_DIGEST_SIZE,
5ec90831 1767 .statesize = sizeof(struct caam_export_state),
045e3678 1768 },
659f313d 1769 },
045e3678
YK
1770 .alg_type = OP_ALG_ALGSEL_SHA384,
1771 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1772 }, {
b0e09bae
YK
1773 .name = "sha512",
1774 .driver_name = "sha512-caam",
1775 .hmac_name = "hmac(sha512)",
1776 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1777 .blocksize = SHA512_BLOCK_SIZE,
1778 .template_ahash = {
1779 .init = ahash_init,
1780 .update = ahash_update,
1781 .final = ahash_final,
1782 .finup = ahash_finup,
1783 .digest = ahash_digest,
1784 .export = ahash_export,
1785 .import = ahash_import,
1786 .setkey = ahash_setkey,
1787 .halg = {
1788 .digestsize = SHA512_DIGEST_SIZE,
5ec90831 1789 .statesize = sizeof(struct caam_export_state),
045e3678 1790 },
659f313d 1791 },
045e3678
YK
1792 .alg_type = OP_ALG_ALGSEL_SHA512,
1793 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1794 }, {
b0e09bae
YK
1795 .name = "md5",
1796 .driver_name = "md5-caam",
1797 .hmac_name = "hmac(md5)",
1798 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1799 .blocksize = MD5_BLOCK_WORDS * 4,
1800 .template_ahash = {
1801 .init = ahash_init,
1802 .update = ahash_update,
1803 .final = ahash_final,
1804 .finup = ahash_finup,
1805 .digest = ahash_digest,
1806 .export = ahash_export,
1807 .import = ahash_import,
1808 .setkey = ahash_setkey,
1809 .halg = {
1810 .digestsize = MD5_DIGEST_SIZE,
5ec90831 1811 .statesize = sizeof(struct caam_export_state),
045e3678 1812 },
659f313d 1813 },
045e3678
YK
1814 .alg_type = OP_ALG_ALGSEL_MD5,
1815 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1816 },
1817};
1818
1819struct caam_hash_alg {
1820 struct list_head entry;
045e3678
YK
1821 int alg_type;
1822 int alg_op;
1823 struct ahash_alg ahash_alg;
1824};
1825
1826static int caam_hash_cra_init(struct crypto_tfm *tfm)
1827{
1828 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1829 struct crypto_alg *base = tfm->__crt_alg;
1830 struct hash_alg_common *halg =
1831 container_of(base, struct hash_alg_common, base);
1832 struct ahash_alg *alg =
1833 container_of(halg, struct ahash_alg, halg);
1834 struct caam_hash_alg *caam_hash =
1835 container_of(alg, struct caam_hash_alg, ahash_alg);
1836 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
045e3678
YK
1837 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1838 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1839 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1840 HASH_MSG_LEN + 32,
1841 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1842 HASH_MSG_LEN + 64,
1843 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
045e3678
YK
1844
1845 /*
cfc6f11b 1846 * Get a Job ring from Job Ring driver to ensure in-order
045e3678
YK
1847 * crypto request processing per tfm
1848 */
cfc6f11b
RG
1849 ctx->jrdev = caam_jr_alloc();
1850 if (IS_ERR(ctx->jrdev)) {
1851 pr_err("Job Ring Device allocation for transform failed\n");
1852 return PTR_ERR(ctx->jrdev);
1853 }
045e3678
YK
1854 /* copy descriptor header template value */
1855 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1856 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1857
1858 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1859 OP_ALG_ALGSEL_SHIFT];
1860
1861 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1862 sizeof(struct caam_hash_state));
e6cc5b8d 1863 return ahash_set_sh_desc(ahash);
045e3678
YK
1864}
1865
1866static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1867{
1868 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1869
1870 if (ctx->sh_desc_update_dma &&
1871 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1872 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1873 desc_bytes(ctx->sh_desc_update),
1874 DMA_TO_DEVICE);
1875 if (ctx->sh_desc_update_first_dma &&
1876 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1877 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1878 desc_bytes(ctx->sh_desc_update_first),
1879 DMA_TO_DEVICE);
1880 if (ctx->sh_desc_fin_dma &&
1881 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1882 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1883 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1884 if (ctx->sh_desc_digest_dma &&
1885 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1886 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1887 desc_bytes(ctx->sh_desc_digest),
1888 DMA_TO_DEVICE);
1889 if (ctx->sh_desc_finup_dma &&
1890 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1891 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1892 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
cfc6f11b
RG
1893
1894 caam_jr_free(ctx->jrdev);
045e3678
YK
1895}
1896
1897static void __exit caam_algapi_hash_exit(void)
1898{
045e3678
YK
1899 struct caam_hash_alg *t_alg, *n;
1900
cfc6f11b 1901 if (!hash_list.next)
045e3678
YK
1902 return;
1903
cfc6f11b 1904 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
045e3678
YK
1905 crypto_unregister_ahash(&t_alg->ahash_alg);
1906 list_del(&t_alg->entry);
1907 kfree(t_alg);
1908 }
1909}
1910
1911static struct caam_hash_alg *
cfc6f11b 1912caam_hash_alloc(struct caam_hash_template *template,
b0e09bae 1913 bool keyed)
045e3678
YK
1914{
1915 struct caam_hash_alg *t_alg;
1916 struct ahash_alg *halg;
1917 struct crypto_alg *alg;
1918
9c4f9733 1919 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
045e3678 1920 if (!t_alg) {
cfc6f11b 1921 pr_err("failed to allocate t_alg\n");
045e3678
YK
1922 return ERR_PTR(-ENOMEM);
1923 }
1924
1925 t_alg->ahash_alg = template->template_ahash;
1926 halg = &t_alg->ahash_alg;
1927 alg = &halg->halg.base;
1928
b0e09bae
YK
1929 if (keyed) {
1930 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1931 template->hmac_name);
1932 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1933 template->hmac_driver_name);
1934 } else {
1935 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1936 template->name);
1937 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1938 template->driver_name);
a0118c8b 1939 t_alg->ahash_alg.setkey = NULL;
b0e09bae 1940 }
045e3678
YK
1941 alg->cra_module = THIS_MODULE;
1942 alg->cra_init = caam_hash_cra_init;
1943 alg->cra_exit = caam_hash_cra_exit;
1944 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1945 alg->cra_priority = CAAM_CRA_PRIORITY;
1946 alg->cra_blocksize = template->blocksize;
1947 alg->cra_alignmask = 0;
1948 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1949 alg->cra_type = &crypto_ahash_type;
1950
1951 t_alg->alg_type = template->alg_type;
1952 t_alg->alg_op = template->alg_op;
045e3678
YK
1953
1954 return t_alg;
1955}
1956
1957static int __init caam_algapi_hash_init(void)
1958{
35af6403
RG
1959 struct device_node *dev_node;
1960 struct platform_device *pdev;
1961 struct device *ctrldev;
045e3678 1962 int i = 0, err = 0;
bf83490e
VM
1963 struct caam_drv_private *priv;
1964 unsigned int md_limit = SHA512_DIGEST_SIZE;
1965 u32 cha_inst, cha_vid;
045e3678 1966
35af6403
RG
1967 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1968 if (!dev_node) {
1969 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1970 if (!dev_node)
1971 return -ENODEV;
1972 }
1973
1974 pdev = of_find_device_by_node(dev_node);
1975 if (!pdev) {
1976 of_node_put(dev_node);
1977 return -ENODEV;
1978 }
1979
1980 ctrldev = &pdev->dev;
1981 priv = dev_get_drvdata(ctrldev);
1982 of_node_put(dev_node);
1983
1984 /*
1985 * If priv is NULL, it's probably because the caam driver wasn't
1986 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1987 */
1988 if (!priv)
1989 return -ENODEV;
1990
bf83490e
VM
1991 /*
1992 * Register crypto algorithms the device supports. First, identify
1993 * presence and attributes of MD block.
1994 */
1995 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1996 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1997
1998 /*
1999 * Skip registration of any hashing algorithms if MD block
2000 * is not present.
2001 */
2002 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
2003 return -ENODEV;
2004
2005 /* Limit digest size based on LP256 */
2006 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
2007 md_limit = SHA256_DIGEST_SIZE;
2008
cfc6f11b 2009 INIT_LIST_HEAD(&hash_list);
045e3678
YK
2010
2011 /* register crypto algorithms the device supports */
2012 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
045e3678 2013 struct caam_hash_alg *t_alg;
bf83490e
VM
2014 struct caam_hash_template *alg = driver_hash + i;
2015
2016 /* If MD size is not supported by device, skip registration */
2017 if (alg->template_ahash.halg.digestsize > md_limit)
2018 continue;
045e3678 2019
b0e09bae 2020 /* register hmac version */
bf83490e 2021 t_alg = caam_hash_alloc(alg, true);
b0e09bae
YK
2022 if (IS_ERR(t_alg)) {
2023 err = PTR_ERR(t_alg);
bf83490e 2024 pr_warn("%s alg allocation failed\n", alg->driver_name);
b0e09bae
YK
2025 continue;
2026 }
2027
2028 err = crypto_register_ahash(&t_alg->ahash_alg);
2029 if (err) {
6ea30f0a
RK
2030 pr_warn("%s alg registration failed: %d\n",
2031 t_alg->ahash_alg.halg.base.cra_driver_name,
2032 err);
b0e09bae
YK
2033 kfree(t_alg);
2034 } else
cfc6f11b 2035 list_add_tail(&t_alg->entry, &hash_list);
b0e09bae
YK
2036
2037 /* register unkeyed version */
bf83490e 2038 t_alg = caam_hash_alloc(alg, false);
045e3678
YK
2039 if (IS_ERR(t_alg)) {
2040 err = PTR_ERR(t_alg);
bf83490e 2041 pr_warn("%s alg allocation failed\n", alg->driver_name);
045e3678
YK
2042 continue;
2043 }
2044
2045 err = crypto_register_ahash(&t_alg->ahash_alg);
2046 if (err) {
6ea30f0a
RK
2047 pr_warn("%s alg registration failed: %d\n",
2048 t_alg->ahash_alg.halg.base.cra_driver_name,
2049 err);
045e3678
YK
2050 kfree(t_alg);
2051 } else
cfc6f11b 2052 list_add_tail(&t_alg->entry, &hash_list);
045e3678
YK
2053 }
2054
2055 return err;
2056}
2057
2058module_init(caam_algapi_hash_init);
2059module_exit(caam_algapi_hash_exit);
2060
2061MODULE_LICENSE("GPL");
2062MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2063MODULE_AUTHOR("Freescale Semiconductor - NMG");