]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/caam/caamhash.c
crypto: caam - fix error path for ctx_dma mapping failure
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / caam / caamhash.c
CommitLineData
045e3678
YK
1/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
39957c8e 75#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
045e3678
YK
76#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86/* caam context sizes for hashes: running digest + 8 */
87#define HASH_MSG_LEN 8
88#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90#ifdef DEBUG
91/* for print_hex_dumps with line references */
045e3678
YK
92#define debug(format, arg...) printk(format, arg)
93#else
94#define debug(format, arg...)
95#endif
96
cfc6f11b
RG
97
98static struct list_head hash_list;
99
045e3678
YK
100/* ahash per-session context */
101struct caam_hash_ctx {
e11793f5
RK
102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
e11793f5 106 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
045e3678
YK
107 dma_addr_t sh_desc_update_first_dma;
108 dma_addr_t sh_desc_fin_dma;
109 dma_addr_t sh_desc_digest_dma;
e11793f5 110 struct device *jrdev;
045e3678 111 u8 key[CAAM_MAX_HASH_KEY_SIZE];
045e3678 112 int ctx_len;
db57656b 113 struct alginfo adata;
045e3678
YK
114};
115
116/* ahash state */
117struct caam_hash_state {
118 dma_addr_t buf_dma;
119 dma_addr_t ctx_dma;
120 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
121 int buflen_0;
122 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
123 int buflen_1;
e7472422 124 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
045e3678
YK
125 int (*update)(struct ahash_request *req);
126 int (*final)(struct ahash_request *req);
127 int (*finup)(struct ahash_request *req);
128 int current_buf;
129};
130
5ec90831
RK
131struct caam_export_state {
132 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
133 u8 caam_ctx[MAX_CTX_LEN];
134 int buflen;
135 int (*update)(struct ahash_request *req);
136 int (*final)(struct ahash_request *req);
137 int (*finup)(struct ahash_request *req);
138};
139
045e3678
YK
140/* Common job descriptor seq in/out ptr routines */
141
142/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
ce572085
HG
143static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144 struct caam_hash_state *state,
145 int ctx_len)
045e3678
YK
146{
147 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
148 ctx_len, DMA_FROM_DEVICE);
ce572085
HG
149 if (dma_mapping_error(jrdev, state->ctx_dma)) {
150 dev_err(jrdev, "unable to map ctx\n");
87ec02e7 151 state->ctx_dma = 0;
ce572085
HG
152 return -ENOMEM;
153 }
154
045e3678 155 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
ce572085
HG
156
157 return 0;
045e3678
YK
158}
159
160/* Map req->result, and append seq_out_ptr command that points to it */
161static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
162 u8 *result, int digestsize)
163{
164 dma_addr_t dst_dma;
165
166 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
167 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
168
169 return dst_dma;
170}
171
172/* Map current buffer in state and put it in link table */
173static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
174 struct sec4_sg_entry *sec4_sg,
175 u8 *buf, int buflen)
176{
177 dma_addr_t buf_dma;
178
179 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
180 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
181
182 return buf_dma;
183}
184
045e3678
YK
185/*
186 * Only put buffer in link table if it contains data, which is possible,
187 * since a buffer has previously been used, and needs to be unmapped,
188 */
189static inline dma_addr_t
190try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
191 u8 *buf, dma_addr_t buf_dma, int buflen,
192 int last_buflen)
193{
194 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
195 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
196 if (buflen)
197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
198 else
199 buf_dma = 0;
200
201 return buf_dma;
202}
203
204/* Map state->caam_ctx, and add it to link table */
ce572085
HG
205static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
206 struct caam_hash_state *state, int ctx_len,
207 struct sec4_sg_entry *sec4_sg, u32 flag)
045e3678
YK
208{
209 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
ce572085
HG
210 if (dma_mapping_error(jrdev, state->ctx_dma)) {
211 dev_err(jrdev, "unable to map ctx\n");
87ec02e7 212 state->ctx_dma = 0;
ce572085
HG
213 return -ENOMEM;
214 }
215
045e3678 216 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
ce572085
HG
217
218 return 0;
045e3678
YK
219}
220
045e3678 221/*
1a0166f1
HG
222 * For ahash update, final and finup (import_ctx = true)
223 * import context, read and write to seqout
224 * For ahash firsts and digest (import_ctx = false)
225 * read and write to seqout
045e3678 226 */
1a0166f1
HG
227static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
228 struct caam_hash_ctx *ctx, bool import_ctx)
045e3678 229{
1a0166f1
HG
230 u32 op = ctx->adata.algtype;
231 u32 *skip_key_load;
045e3678 232
1a0166f1 233 init_sh_desc(desc, HDR_SHARE_SERIAL);
045e3678 234
1a0166f1
HG
235 /* Append key if it has been set; ahash update excluded */
236 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
237 /* Skip key loading if already shared */
238 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
239 JUMP_COND_SHRD);
045e3678 240
1a0166f1
HG
241 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
242 ctx->adata.keylen, CLASS_2 |
243 KEY_DEST_MDHA_SPLIT | KEY_ENC);
045e3678 244
1a0166f1 245 set_jump_tgt_here(desc, skip_key_load);
045e3678 246
1a0166f1
HG
247 op |= OP_ALG_AAI_HMAC_PRECOMP;
248 }
045e3678 249
1a0166f1
HG
250 /* If needed, import context from software */
251 if (import_ctx)
252 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
253 LDST_SRCDST_BYTE_CONTEXT);
045e3678
YK
254
255 /* Class 2 operation */
256 append_operation(desc, op | state | OP_ALG_ENCRYPT);
257
258 /*
259 * Load from buf and/or src and write to req->result or state->context
1a0166f1 260 * Calculate remaining bytes to read
045e3678 261 */
1a0166f1
HG
262 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
263 /* Read remaining bytes */
264 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
265 FIFOLD_TYPE_MSG | KEY_VLF);
266 /* Store class2 context bytes */
267 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
268 LDST_SRCDST_BYTE_CONTEXT);
045e3678
YK
269}
270
271static int ahash_set_sh_desc(struct crypto_ahash *ahash)
272{
273 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
274 int digestsize = crypto_ahash_digestsize(ahash);
275 struct device *jrdev = ctx->jrdev;
045e3678
YK
276 u32 *desc;
277
045e3678
YK
278 /* ahash_update shared descriptor */
279 desc = ctx->sh_desc_update;
1a0166f1 280 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
bbf22344
HG
281 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
282 desc_bytes(desc), DMA_TO_DEVICE);
045e3678 283#ifdef DEBUG
514df281
AP
284 print_hex_dump(KERN_ERR,
285 "ahash update shdesc@"__stringify(__LINE__)": ",
045e3678
YK
286 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
287#endif
288
289 /* ahash_update_first shared descriptor */
290 desc = ctx->sh_desc_update_first;
1a0166f1 291 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
bbf22344
HG
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293 desc_bytes(desc), DMA_TO_DEVICE);
045e3678 294#ifdef DEBUG
514df281
AP
295 print_hex_dump(KERN_ERR,
296 "ahash update first shdesc@"__stringify(__LINE__)": ",
045e3678
YK
297 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
298#endif
299
300 /* ahash_final shared descriptor */
301 desc = ctx->sh_desc_fin;
1a0166f1 302 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
bbf22344
HG
303 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
304 desc_bytes(desc), DMA_TO_DEVICE);
045e3678 305#ifdef DEBUG
514df281 306 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
045e3678
YK
307 DUMP_PREFIX_ADDRESS, 16, 4, desc,
308 desc_bytes(desc), 1);
309#endif
310
045e3678
YK
311 /* ahash_digest shared descriptor */
312 desc = ctx->sh_desc_digest;
1a0166f1 313 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
bbf22344
HG
314 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
315 desc_bytes(desc), DMA_TO_DEVICE);
045e3678 316#ifdef DEBUG
514df281
AP
317 print_hex_dump(KERN_ERR,
318 "ahash digest shdesc@"__stringify(__LINE__)": ",
045e3678
YK
319 DUMP_PREFIX_ADDRESS, 16, 4, desc,
320 desc_bytes(desc), 1);
321#endif
322
323 return 0;
324}
325
045e3678 326/* Digest hash size if it is too large */
66b3e887 327static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
328 u32 *keylen, u8 *key_out, u32 digestsize)
329{
330 struct device *jrdev = ctx->jrdev;
331 u32 *desc;
332 struct split_key_result result;
333 dma_addr_t src_dma, dst_dma;
9e6df0fd 334 int ret;
045e3678 335
9c23b7d3 336 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
337 if (!desc) {
338 dev_err(jrdev, "unable to allocate key input memory\n");
339 return -ENOMEM;
340 }
045e3678
YK
341
342 init_job_desc(desc, 0);
343
344 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
345 DMA_TO_DEVICE);
346 if (dma_mapping_error(jrdev, src_dma)) {
347 dev_err(jrdev, "unable to map key input memory\n");
348 kfree(desc);
349 return -ENOMEM;
350 }
351 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
352 DMA_FROM_DEVICE);
353 if (dma_mapping_error(jrdev, dst_dma)) {
354 dev_err(jrdev, "unable to map key output memory\n");
355 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
356 kfree(desc);
357 return -ENOMEM;
358 }
359
360 /* Job descriptor to perform unkeyed hash on key_in */
db57656b 361 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
045e3678
YK
362 OP_ALG_AS_INITFINAL);
363 append_seq_in_ptr(desc, src_dma, *keylen, 0);
364 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
365 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
366 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
367 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
368 LDST_SRCDST_BYTE_CONTEXT);
369
370#ifdef DEBUG
514df281 371 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
045e3678 372 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
514df281 373 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
374 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
375#endif
376
377 result.err = 0;
378 init_completion(&result.completion);
379
380 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
381 if (!ret) {
382 /* in progress */
383 wait_for_completion_interruptible(&result.completion);
384 ret = result.err;
385#ifdef DEBUG
514df281
AP
386 print_hex_dump(KERN_ERR,
387 "digested key@"__stringify(__LINE__)": ",
045e3678
YK
388 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
389 digestsize, 1);
390#endif
391 }
045e3678
YK
392 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
393 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
394
e11aa9f1
HG
395 *keylen = digestsize;
396
045e3678
YK
397 kfree(desc);
398
399 return ret;
400}
401
402static int ahash_setkey(struct crypto_ahash *ahash,
403 const u8 *key, unsigned int keylen)
404{
045e3678 405 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
045e3678
YK
406 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
407 int digestsize = crypto_ahash_digestsize(ahash);
9e6df0fd 408 int ret;
045e3678
YK
409 u8 *hashed_key = NULL;
410
411#ifdef DEBUG
412 printk(KERN_ERR "keylen %d\n", keylen);
413#endif
414
415 if (keylen > blocksize) {
e7a33c4d
ME
416 hashed_key = kmalloc_array(digestsize,
417 sizeof(*hashed_key),
418 GFP_KERNEL | GFP_DMA);
045e3678
YK
419 if (!hashed_key)
420 return -ENOMEM;
421 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
422 digestsize);
423 if (ret)
d6e7a7d0 424 goto bad_free_key;
045e3678
YK
425 key = hashed_key;
426 }
427
6655cb8e
HG
428 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
429 CAAM_MAX_HASH_KEY_SIZE);
045e3678 430 if (ret)
d6e7a7d0 431 goto bad_free_key;
045e3678 432
045e3678 433#ifdef DEBUG
514df281 434 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
045e3678 435 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
db57656b 436 ctx->adata.keylen_pad, 1);
045e3678
YK
437#endif
438
045e3678 439 kfree(hashed_key);
cfb725f6 440 return ahash_set_sh_desc(ahash);
d6e7a7d0 441 bad_free_key:
045e3678
YK
442 kfree(hashed_key);
443 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
444 return -EINVAL;
445}
446
447/*
448 * ahash_edesc - s/w-extended ahash descriptor
449 * @dst_dma: physical mapped address of req->result
450 * @sec4_sg_dma: physical mapped address of h/w link table
451 * @src_nents: number of segments in input scatterlist
452 * @sec4_sg_bytes: length of dma mapped sec4_sg space
045e3678 453 * @hw_desc: the h/w job descriptor followed by any referenced link tables
343e44b1 454 * @sec4_sg: h/w link table
045e3678
YK
455 */
456struct ahash_edesc {
457 dma_addr_t dst_dma;
458 dma_addr_t sec4_sg_dma;
459 int src_nents;
460 int sec4_sg_bytes;
d7b24ed4 461 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
343e44b1 462 struct sec4_sg_entry sec4_sg[0];
045e3678
YK
463};
464
465static inline void ahash_unmap(struct device *dev,
466 struct ahash_edesc *edesc,
467 struct ahash_request *req, int dst_len)
468{
469 if (edesc->src_nents)
13fb8fd7 470 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
045e3678
YK
471 if (edesc->dst_dma)
472 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
473
474 if (edesc->sec4_sg_bytes)
475 dma_unmap_single(dev, edesc->sec4_sg_dma,
476 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
477}
478
479static inline void ahash_unmap_ctx(struct device *dev,
480 struct ahash_edesc *edesc,
481 struct ahash_request *req, int dst_len, u32 flag)
482{
483 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
484 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
485 struct caam_hash_state *state = ahash_request_ctx(req);
486
87ec02e7 487 if (state->ctx_dma) {
045e3678 488 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
87ec02e7
HG
489 state->ctx_dma = 0;
490 }
045e3678
YK
491 ahash_unmap(dev, edesc, req, dst_len);
492}
493
494static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
495 void *context)
496{
497 struct ahash_request *req = context;
498 struct ahash_edesc *edesc;
499 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
500 int digestsize = crypto_ahash_digestsize(ahash);
501#ifdef DEBUG
502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
503 struct caam_hash_state *state = ahash_request_ctx(req);
504
505 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
506#endif
507
4ca7c7d8 508 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
509 if (err)
510 caam_jr_strstatus(jrdev, err);
045e3678
YK
511
512 ahash_unmap(jrdev, edesc, req, digestsize);
513 kfree(edesc);
514
515#ifdef DEBUG
514df281 516 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
517 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
518 ctx->ctx_len, 1);
519 if (req->result)
514df281 520 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
521 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
522 digestsize, 1);
523#endif
524
525 req->base.complete(&req->base, err);
526}
527
528static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
529 void *context)
530{
531 struct ahash_request *req = context;
532 struct ahash_edesc *edesc;
533 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
534 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
535#ifdef DEBUG
536 struct caam_hash_state *state = ahash_request_ctx(req);
537 int digestsize = crypto_ahash_digestsize(ahash);
538
539 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
540#endif
541
4ca7c7d8 542 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
543 if (err)
544 caam_jr_strstatus(jrdev, err);
045e3678
YK
545
546 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
547 kfree(edesc);
548
549#ifdef DEBUG
514df281 550 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
551 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
552 ctx->ctx_len, 1);
553 if (req->result)
514df281 554 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
555 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
556 digestsize, 1);
557#endif
558
559 req->base.complete(&req->base, err);
560}
561
562static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
563 void *context)
564{
565 struct ahash_request *req = context;
566 struct ahash_edesc *edesc;
567 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
568 int digestsize = crypto_ahash_digestsize(ahash);
569#ifdef DEBUG
570 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
571 struct caam_hash_state *state = ahash_request_ctx(req);
572
573 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
574#endif
575
4ca7c7d8 576 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
577 if (err)
578 caam_jr_strstatus(jrdev, err);
045e3678 579
bc9e05f9 580 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
045e3678
YK
581 kfree(edesc);
582
583#ifdef DEBUG
514df281 584 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
585 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
586 ctx->ctx_len, 1);
587 if (req->result)
514df281 588 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
589 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
590 digestsize, 1);
591#endif
592
593 req->base.complete(&req->base, err);
594}
595
596static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
597 void *context)
598{
599 struct ahash_request *req = context;
600 struct ahash_edesc *edesc;
601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
602 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
603#ifdef DEBUG
604 struct caam_hash_state *state = ahash_request_ctx(req);
605 int digestsize = crypto_ahash_digestsize(ahash);
606
607 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
608#endif
609
4ca7c7d8 610 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
611 if (err)
612 caam_jr_strstatus(jrdev, err);
045e3678 613
ef62b231 614 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
045e3678
YK
615 kfree(edesc);
616
617#ifdef DEBUG
514df281 618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
620 ctx->ctx_len, 1);
621 if (req->result)
514df281 622 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
623 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
624 digestsize, 1);
625#endif
626
627 req->base.complete(&req->base, err);
628}
629
5588d039
RK
630/*
631 * Allocate an enhanced descriptor, which contains the hardware descriptor
632 * and space for hardware scatter table containing sg_num entries.
633 */
634static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
30a43b44
RK
635 int sg_num, u32 *sh_desc,
636 dma_addr_t sh_desc_dma,
637 gfp_t flags)
5588d039
RK
638{
639 struct ahash_edesc *edesc;
640 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
641
642 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
643 if (!edesc) {
644 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
645 return NULL;
646 }
647
30a43b44
RK
648 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
649 HDR_SHARE_DEFER | HDR_REVERSE);
650
5588d039
RK
651 return edesc;
652}
653
65cf164a
RK
654static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
655 struct ahash_edesc *edesc,
656 struct ahash_request *req, int nents,
657 unsigned int first_sg,
658 unsigned int first_bytes, size_t to_hash)
659{
660 dma_addr_t src_dma;
661 u32 options;
662
663 if (nents > 1 || first_sg) {
664 struct sec4_sg_entry *sg = edesc->sec4_sg;
665 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
666
667 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
668
669 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
670 if (dma_mapping_error(ctx->jrdev, src_dma)) {
671 dev_err(ctx->jrdev, "unable to map S/G table\n");
672 return -ENOMEM;
673 }
674
675 edesc->sec4_sg_bytes = sgsize;
676 edesc->sec4_sg_dma = src_dma;
677 options = LDST_SGF;
678 } else {
679 src_dma = sg_dma_address(req->src);
680 options = 0;
681 }
682
683 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
684 options);
685
686 return 0;
687}
688
045e3678
YK
689/* submit update job descriptor */
690static int ahash_update_ctx(struct ahash_request *req)
691{
692 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
693 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
694 struct caam_hash_state *state = ahash_request_ctx(req);
695 struct device *jrdev = ctx->jrdev;
696 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
697 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
698 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
699 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
700 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
701 int *next_buflen = state->current_buf ? &state->buflen_0 :
702 &state->buflen_1, last_buflen;
703 int in_len = *buflen + req->nbytes, to_hash;
30a43b44 704 u32 *desc;
bc13c69e 705 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
706 struct ahash_edesc *edesc;
707 int ret = 0;
045e3678
YK
708
709 last_buflen = *next_buflen;
710 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
711 to_hash = in_len - *next_buflen;
712
713 if (to_hash) {
13fb8fd7
LC
714 src_nents = sg_nents_for_len(req->src,
715 req->nbytes - (*next_buflen));
f9970c28
LC
716 if (src_nents < 0) {
717 dev_err(jrdev, "Invalid number of src SG.\n");
718 return src_nents;
719 }
bc13c69e
RK
720
721 if (src_nents) {
722 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
723 DMA_TO_DEVICE);
724 if (!mapped_nents) {
725 dev_err(jrdev, "unable to DMA map source\n");
726 return -ENOMEM;
727 }
728 } else {
729 mapped_nents = 0;
730 }
731
045e3678 732 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
bc13c69e 733 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
734 sizeof(struct sec4_sg_entry);
735
736 /*
737 * allocate space for base edesc and hw desc commands,
738 * link tables
739 */
5588d039 740 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
30a43b44
RK
741 ctx->sh_desc_update,
742 ctx->sh_desc_update_dma, flags);
045e3678 743 if (!edesc) {
bc13c69e 744 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
745 return -ENOMEM;
746 }
747
748 edesc->src_nents = src_nents;
749 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 750
ce572085
HG
751 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
752 edesc->sec4_sg, DMA_BIDIRECTIONAL);
753 if (ret)
58b0e5d0 754 goto unmap_ctx;
045e3678
YK
755
756 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
757 edesc->sec4_sg + 1,
758 buf, state->buf_dma,
c7556ff7 759 *buflen, last_buflen);
045e3678 760
bc13c69e
RK
761 if (mapped_nents) {
762 sg_to_sec4_sg_last(req->src, mapped_nents,
763 edesc->sec4_sg + sec4_sg_src_index,
764 0);
8af7b0f8 765 if (*next_buflen)
307fd543
CS
766 scatterwalk_map_and_copy(next_buf, req->src,
767 to_hash - *buflen,
768 *next_buflen, 0);
045e3678
YK
769 } else {
770 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
261ea058 771 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678
YK
772 }
773
8af7b0f8
VM
774 state->current_buf = !state->current_buf;
775
045e3678 776 desc = edesc->hw_desc;
045e3678 777
1da2be33
RG
778 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
779 sec4_sg_bytes,
780 DMA_TO_DEVICE);
ce572085
HG
781 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
782 dev_err(jrdev, "unable to map S/G table\n");
32686d34 783 ret = -ENOMEM;
58b0e5d0 784 goto unmap_ctx;
ce572085 785 }
1da2be33 786
045e3678
YK
787 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
788 to_hash, LDST_SGF);
789
790 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
791
792#ifdef DEBUG
514df281 793 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
794 DUMP_PREFIX_ADDRESS, 16, 4, desc,
795 desc_bytes(desc), 1);
796#endif
797
798 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
32686d34 799 if (ret)
58b0e5d0 800 goto unmap_ctx;
32686d34
RK
801
802 ret = -EINPROGRESS;
045e3678 803 } else if (*next_buflen) {
307fd543
CS
804 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
805 req->nbytes, 0);
045e3678
YK
806 *buflen = *next_buflen;
807 *next_buflen = last_buflen;
808 }
809#ifdef DEBUG
514df281 810 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 811 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 812 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
813 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
814 *next_buflen, 1);
815#endif
816
817 return ret;
58b0e5d0 818 unmap_ctx:
32686d34
RK
819 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
820 kfree(edesc);
821 return ret;
045e3678
YK
822}
823
824static int ahash_final_ctx(struct ahash_request *req)
825{
826 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
827 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
828 struct caam_hash_state *state = ahash_request_ctx(req);
829 struct device *jrdev = ctx->jrdev;
830 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
831 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
832 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
833 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
834 int last_buflen = state->current_buf ? state->buflen_0 :
835 state->buflen_1;
30a43b44 836 u32 *desc;
b310c178 837 int sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
838 int digestsize = crypto_ahash_digestsize(ahash);
839 struct ahash_edesc *edesc;
9e6df0fd 840 int ret;
045e3678 841
b310c178
HG
842 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
843 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
045e3678
YK
844
845 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
846 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
847 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
848 flags);
5588d039 849 if (!edesc)
045e3678 850 return -ENOMEM;
045e3678 851
045e3678 852 desc = edesc->hw_desc;
045e3678
YK
853
854 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
855 edesc->src_nents = 0;
856
ce572085
HG
857 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
858 edesc->sec4_sg, DMA_TO_DEVICE);
859 if (ret)
58b0e5d0 860 goto unmap_ctx;
045e3678
YK
861
862 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
863 buf, state->buf_dma, buflen,
864 last_buflen);
261ea058
HG
865 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
866 cpu_to_caam32(SEC4_SG_LEN_FIN);
045e3678 867
1da2be33
RG
868 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
869 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
870 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
871 dev_err(jrdev, "unable to map S/G table\n");
32686d34 872 ret = -ENOMEM;
58b0e5d0 873 goto unmap_ctx;
ce572085 874 }
1da2be33 875
045e3678
YK
876 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
877 LDST_SGF);
878
879 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
880 digestsize);
ce572085
HG
881 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
882 dev_err(jrdev, "unable to map dst\n");
32686d34 883 ret = -ENOMEM;
58b0e5d0 884 goto unmap_ctx;
ce572085 885 }
045e3678
YK
886
887#ifdef DEBUG
514df281 888 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
889 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
890#endif
891
892 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 893 if (ret)
58b0e5d0 894 goto unmap_ctx;
045e3678 895
32686d34 896 return -EINPROGRESS;
58b0e5d0 897 unmap_ctx:
32686d34
RK
898 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
899 kfree(edesc);
045e3678
YK
900 return ret;
901}
902
903static int ahash_finup_ctx(struct ahash_request *req)
904{
905 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
906 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
907 struct caam_hash_state *state = ahash_request_ctx(req);
908 struct device *jrdev = ctx->jrdev;
909 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
910 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
911 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
912 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
913 int last_buflen = state->current_buf ? state->buflen_0 :
914 state->buflen_1;
30a43b44 915 u32 *desc;
65cf164a 916 int sec4_sg_src_index;
bc13c69e 917 int src_nents, mapped_nents;
045e3678
YK
918 int digestsize = crypto_ahash_digestsize(ahash);
919 struct ahash_edesc *edesc;
9e6df0fd 920 int ret;
045e3678 921
13fb8fd7 922 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
923 if (src_nents < 0) {
924 dev_err(jrdev, "Invalid number of src SG.\n");
925 return src_nents;
926 }
bc13c69e
RK
927
928 if (src_nents) {
929 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
930 DMA_TO_DEVICE);
931 if (!mapped_nents) {
932 dev_err(jrdev, "unable to DMA map source\n");
933 return -ENOMEM;
934 }
935 } else {
936 mapped_nents = 0;
937 }
938
045e3678 939 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
045e3678
YK
940
941 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 942 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
9a1a1c08 943 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
5588d039 944 flags);
045e3678 945 if (!edesc) {
bc13c69e 946 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
947 return -ENOMEM;
948 }
949
045e3678 950 desc = edesc->hw_desc;
045e3678
YK
951
952 edesc->src_nents = src_nents;
045e3678 953
ce572085
HG
954 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
955 edesc->sec4_sg, DMA_TO_DEVICE);
956 if (ret)
58b0e5d0 957 goto unmap_ctx;
045e3678
YK
958
959 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
960 buf, state->buf_dma, buflen,
961 last_buflen);
962
65cf164a
RK
963 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
964 sec4_sg_src_index, ctx->ctx_len + buflen,
965 req->nbytes);
966 if (ret)
58b0e5d0 967 goto unmap_ctx;
045e3678
YK
968
969 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
970 digestsize);
ce572085
HG
971 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
972 dev_err(jrdev, "unable to map dst\n");
32686d34 973 ret = -ENOMEM;
58b0e5d0 974 goto unmap_ctx;
ce572085 975 }
045e3678
YK
976
977#ifdef DEBUG
514df281 978 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
979 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
980#endif
981
982 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 983 if (ret)
58b0e5d0 984 goto unmap_ctx;
045e3678 985
32686d34 986 return -EINPROGRESS;
58b0e5d0 987 unmap_ctx:
32686d34
RK
988 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
989 kfree(edesc);
045e3678
YK
990 return ret;
991}
992
993static int ahash_digest(struct ahash_request *req)
994{
995 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
996 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
997 struct device *jrdev = ctx->jrdev;
998 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
999 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
30a43b44 1000 u32 *desc;
045e3678 1001 int digestsize = crypto_ahash_digestsize(ahash);
65cf164a 1002 int src_nents, mapped_nents;
045e3678 1003 struct ahash_edesc *edesc;
9e6df0fd 1004 int ret;
045e3678 1005
3d5a2db6 1006 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1007 if (src_nents < 0) {
1008 dev_err(jrdev, "Invalid number of src SG.\n");
1009 return src_nents;
1010 }
bc13c69e
RK
1011
1012 if (src_nents) {
1013 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1014 DMA_TO_DEVICE);
1015 if (!mapped_nents) {
1016 dev_err(jrdev, "unable to map source for DMA\n");
1017 return -ENOMEM;
1018 }
1019 } else {
1020 mapped_nents = 0;
1021 }
1022
045e3678 1023 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1024 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
30a43b44 1025 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
5588d039 1026 flags);
045e3678 1027 if (!edesc) {
bc13c69e 1028 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1029 return -ENOMEM;
1030 }
343e44b1 1031
045e3678
YK
1032 edesc->src_nents = src_nents;
1033
65cf164a
RK
1034 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1035 req->nbytes);
1036 if (ret) {
1037 ahash_unmap(jrdev, edesc, req, digestsize);
1038 kfree(edesc);
1039 return ret;
045e3678 1040 }
65cf164a
RK
1041
1042 desc = edesc->hw_desc;
045e3678
YK
1043
1044 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1045 digestsize);
ce572085
HG
1046 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1047 dev_err(jrdev, "unable to map dst\n");
32686d34
RK
1048 ahash_unmap(jrdev, edesc, req, digestsize);
1049 kfree(edesc);
ce572085
HG
1050 return -ENOMEM;
1051 }
045e3678
YK
1052
1053#ifdef DEBUG
514df281 1054 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1055 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1056#endif
1057
1058 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1059 if (!ret) {
1060 ret = -EINPROGRESS;
1061 } else {
1062 ahash_unmap(jrdev, edesc, req, digestsize);
1063 kfree(edesc);
1064 }
1065
1066 return ret;
1067}
1068
1069/* submit ahash final if it the first job descriptor */
1070static int ahash_final_no_ctx(struct ahash_request *req)
1071{
1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1074 struct caam_hash_state *state = ahash_request_ctx(req);
1075 struct device *jrdev = ctx->jrdev;
1076 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1077 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1078 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1079 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
30a43b44 1080 u32 *desc;
045e3678
YK
1081 int digestsize = crypto_ahash_digestsize(ahash);
1082 struct ahash_edesc *edesc;
9e6df0fd 1083 int ret;
045e3678
YK
1084
1085 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1086 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1087 ctx->sh_desc_digest_dma, flags);
5588d039 1088 if (!edesc)
045e3678 1089 return -ENOMEM;
045e3678 1090
045e3678 1091 desc = edesc->hw_desc;
045e3678
YK
1092
1093 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
ce572085
HG
1094 if (dma_mapping_error(jrdev, state->buf_dma)) {
1095 dev_err(jrdev, "unable to map src\n");
06435f34 1096 goto unmap;
ce572085 1097 }
045e3678
YK
1098
1099 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1100
1101 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1102 digestsize);
ce572085
HG
1103 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1104 dev_err(jrdev, "unable to map dst\n");
06435f34 1105 goto unmap;
ce572085 1106 }
045e3678
YK
1107 edesc->src_nents = 0;
1108
1109#ifdef DEBUG
514df281 1110 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1111 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1112#endif
1113
1114 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1115 if (!ret) {
1116 ret = -EINPROGRESS;
1117 } else {
1118 ahash_unmap(jrdev, edesc, req, digestsize);
1119 kfree(edesc);
1120 }
1121
1122 return ret;
06435f34
ME
1123 unmap:
1124 ahash_unmap(jrdev, edesc, req, digestsize);
1125 kfree(edesc);
1126 return -ENOMEM;
1127
045e3678
YK
1128}
1129
1130/* submit ahash update if it the first job descriptor after update */
1131static int ahash_update_no_ctx(struct ahash_request *req)
1132{
1133 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1134 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1135 struct caam_hash_state *state = ahash_request_ctx(req);
1136 struct device *jrdev = ctx->jrdev;
1137 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1138 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1139 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1140 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1141 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1142 int *next_buflen = state->current_buf ? &state->buflen_0 :
1143 &state->buflen_1;
1144 int in_len = *buflen + req->nbytes, to_hash;
bc13c69e 1145 int sec4_sg_bytes, src_nents, mapped_nents;
045e3678 1146 struct ahash_edesc *edesc;
30a43b44 1147 u32 *desc;
045e3678 1148 int ret = 0;
045e3678
YK
1149
1150 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1151 to_hash = in_len - *next_buflen;
1152
1153 if (to_hash) {
13fb8fd7 1154 src_nents = sg_nents_for_len(req->src,
3d5a2db6 1155 req->nbytes - *next_buflen);
f9970c28
LC
1156 if (src_nents < 0) {
1157 dev_err(jrdev, "Invalid number of src SG.\n");
1158 return src_nents;
1159 }
bc13c69e
RK
1160
1161 if (src_nents) {
1162 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1163 DMA_TO_DEVICE);
1164 if (!mapped_nents) {
1165 dev_err(jrdev, "unable to DMA map source\n");
1166 return -ENOMEM;
1167 }
1168 } else {
1169 mapped_nents = 0;
1170 }
1171
1172 sec4_sg_bytes = (1 + mapped_nents) *
045e3678
YK
1173 sizeof(struct sec4_sg_entry);
1174
1175 /*
1176 * allocate space for base edesc and hw desc commands,
1177 * link tables
1178 */
30a43b44
RK
1179 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1180 ctx->sh_desc_update_first,
1181 ctx->sh_desc_update_first_dma,
1182 flags);
045e3678 1183 if (!edesc) {
bc13c69e 1184 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1185 return -ENOMEM;
1186 }
1187
1188 edesc->src_nents = src_nents;
1189 edesc->sec4_sg_bytes = sec4_sg_bytes;
76b99080 1190 edesc->dst_dma = 0;
045e3678
YK
1191
1192 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1193 buf, *buflen);
bc13c69e
RK
1194 sg_to_sec4_sg_last(req->src, mapped_nents,
1195 edesc->sec4_sg + 1, 0);
1196
045e3678 1197 if (*next_buflen) {
307fd543
CS
1198 scatterwalk_map_and_copy(next_buf, req->src,
1199 to_hash - *buflen,
1200 *next_buflen, 0);
045e3678
YK
1201 }
1202
8af7b0f8
VM
1203 state->current_buf = !state->current_buf;
1204
045e3678 1205 desc = edesc->hw_desc;
045e3678 1206
1da2be33
RG
1207 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1208 sec4_sg_bytes,
1209 DMA_TO_DEVICE);
ce572085
HG
1210 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1211 dev_err(jrdev, "unable to map S/G table\n");
32686d34 1212 ret = -ENOMEM;
58b0e5d0 1213 goto unmap_ctx;
ce572085 1214 }
1da2be33 1215
045e3678
YK
1216 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1217
ce572085
HG
1218 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1219 if (ret)
58b0e5d0 1220 goto unmap_ctx;
045e3678
YK
1221
1222#ifdef DEBUG
514df281 1223 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1224 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1225 desc_bytes(desc), 1);
1226#endif
1227
1228 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
32686d34 1229 if (ret)
58b0e5d0 1230 goto unmap_ctx;
32686d34
RK
1231
1232 ret = -EINPROGRESS;
1233 state->update = ahash_update_ctx;
1234 state->finup = ahash_finup_ctx;
1235 state->final = ahash_final_ctx;
045e3678 1236 } else if (*next_buflen) {
307fd543
CS
1237 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1238 req->nbytes, 0);
045e3678
YK
1239 *buflen = *next_buflen;
1240 *next_buflen = 0;
1241 }
1242#ifdef DEBUG
514df281 1243 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 1244 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 1245 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1246 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1247 *next_buflen, 1);
1248#endif
1249
1250 return ret;
58b0e5d0 1251 unmap_ctx:
32686d34
RK
1252 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1253 kfree(edesc);
1254 return ret;
045e3678
YK
1255}
1256
1257/* submit ahash finup if it the first job descriptor after update */
1258static int ahash_finup_no_ctx(struct ahash_request *req)
1259{
1260 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1261 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1262 struct caam_hash_state *state = ahash_request_ctx(req);
1263 struct device *jrdev = ctx->jrdev;
1264 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1265 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1266 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1267 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1268 int last_buflen = state->current_buf ? state->buflen_0 :
1269 state->buflen_1;
30a43b44 1270 u32 *desc;
bc13c69e 1271 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
045e3678
YK
1272 int digestsize = crypto_ahash_digestsize(ahash);
1273 struct ahash_edesc *edesc;
9e6df0fd 1274 int ret;
045e3678 1275
13fb8fd7 1276 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1277 if (src_nents < 0) {
1278 dev_err(jrdev, "Invalid number of src SG.\n");
1279 return src_nents;
1280 }
bc13c69e
RK
1281
1282 if (src_nents) {
1283 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1284 DMA_TO_DEVICE);
1285 if (!mapped_nents) {
1286 dev_err(jrdev, "unable to DMA map source\n");
1287 return -ENOMEM;
1288 }
1289 } else {
1290 mapped_nents = 0;
1291 }
1292
045e3678 1293 sec4_sg_src_index = 2;
bc13c69e 1294 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
1295 sizeof(struct sec4_sg_entry);
1296
1297 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1298 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1299 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1300 flags);
045e3678 1301 if (!edesc) {
bc13c69e 1302 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1303 return -ENOMEM;
1304 }
1305
045e3678 1306 desc = edesc->hw_desc;
045e3678
YK
1307
1308 edesc->src_nents = src_nents;
1309 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678
YK
1310
1311 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1312 state->buf_dma, buflen,
1313 last_buflen);
1314
65cf164a
RK
1315 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1316 req->nbytes);
1317 if (ret) {
ce572085 1318 dev_err(jrdev, "unable to map S/G table\n");
06435f34 1319 goto unmap;
ce572085 1320 }
1da2be33 1321
045e3678
YK
1322 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1323 digestsize);
ce572085
HG
1324 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1325 dev_err(jrdev, "unable to map dst\n");
06435f34 1326 goto unmap;
ce572085 1327 }
045e3678
YK
1328
1329#ifdef DEBUG
514df281 1330 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1331 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1332#endif
1333
1334 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1335 if (!ret) {
1336 ret = -EINPROGRESS;
1337 } else {
1338 ahash_unmap(jrdev, edesc, req, digestsize);
1339 kfree(edesc);
1340 }
1341
1342 return ret;
06435f34
ME
1343 unmap:
1344 ahash_unmap(jrdev, edesc, req, digestsize);
1345 kfree(edesc);
1346 return -ENOMEM;
1347
045e3678
YK
1348}
1349
1350/* submit first update job descriptor after init */
1351static int ahash_update_first(struct ahash_request *req)
1352{
1353 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1354 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1355 struct caam_hash_state *state = ahash_request_ctx(req);
1356 struct device *jrdev = ctx->jrdev;
1357 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1358 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
4451d494
CS
1359 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
1360 int *next_buflen = state->current_buf ?
1361 &state->buflen_1 : &state->buflen_0;
045e3678 1362 int to_hash;
30a43b44 1363 u32 *desc;
65cf164a 1364 int src_nents, mapped_nents;
045e3678
YK
1365 struct ahash_edesc *edesc;
1366 int ret = 0;
045e3678
YK
1367
1368 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1369 1);
1370 to_hash = req->nbytes - *next_buflen;
1371
1372 if (to_hash) {
3d5a2db6
RK
1373 src_nents = sg_nents_for_len(req->src,
1374 req->nbytes - *next_buflen);
f9970c28
LC
1375 if (src_nents < 0) {
1376 dev_err(jrdev, "Invalid number of src SG.\n");
1377 return src_nents;
1378 }
bc13c69e
RK
1379
1380 if (src_nents) {
1381 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1382 DMA_TO_DEVICE);
1383 if (!mapped_nents) {
1384 dev_err(jrdev, "unable to map source for DMA\n");
1385 return -ENOMEM;
1386 }
1387 } else {
1388 mapped_nents = 0;
1389 }
045e3678
YK
1390
1391 /*
1392 * allocate space for base edesc and hw desc commands,
1393 * link tables
1394 */
5588d039 1395 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
30a43b44
RK
1396 mapped_nents : 0,
1397 ctx->sh_desc_update_first,
1398 ctx->sh_desc_update_first_dma,
1399 flags);
045e3678 1400 if (!edesc) {
bc13c69e 1401 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1402 return -ENOMEM;
1403 }
1404
1405 edesc->src_nents = src_nents;
76b99080 1406 edesc->dst_dma = 0;
045e3678 1407
65cf164a
RK
1408 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1409 to_hash);
1410 if (ret)
58b0e5d0 1411 goto unmap_ctx;
045e3678
YK
1412
1413 if (*next_buflen)
307fd543
CS
1414 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1415 *next_buflen, 0);
045e3678 1416
045e3678 1417 desc = edesc->hw_desc;
045e3678 1418
ce572085
HG
1419 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1420 if (ret)
58b0e5d0 1421 goto unmap_ctx;
045e3678
YK
1422
1423#ifdef DEBUG
514df281 1424 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1425 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1426 desc_bytes(desc), 1);
1427#endif
1428
32686d34
RK
1429 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1430 if (ret)
58b0e5d0 1431 goto unmap_ctx;
32686d34
RK
1432
1433 ret = -EINPROGRESS;
1434 state->update = ahash_update_ctx;
1435 state->finup = ahash_finup_ctx;
1436 state->final = ahash_final_ctx;
045e3678
YK
1437 } else if (*next_buflen) {
1438 state->update = ahash_update_no_ctx;
1439 state->finup = ahash_finup_no_ctx;
1440 state->final = ahash_final_no_ctx;
307fd543
CS
1441 scatterwalk_map_and_copy(next_buf, req->src, 0,
1442 req->nbytes, 0);
045e3678
YK
1443 }
1444#ifdef DEBUG
514df281 1445 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1446 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1447 *next_buflen, 1);
1448#endif
1449
1450 return ret;
58b0e5d0 1451 unmap_ctx:
32686d34
RK
1452 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1453 kfree(edesc);
1454 return ret;
045e3678
YK
1455}
1456
1457static int ahash_finup_first(struct ahash_request *req)
1458{
1459 return ahash_digest(req);
1460}
1461
1462static int ahash_init(struct ahash_request *req)
1463{
1464 struct caam_hash_state *state = ahash_request_ctx(req);
1465
1466 state->update = ahash_update_first;
1467 state->finup = ahash_finup_first;
1468 state->final = ahash_final_no_ctx;
1469
87ec02e7 1470 state->ctx_dma = 0;
045e3678 1471 state->current_buf = 0;
de0e35ec 1472 state->buf_dma = 0;
6fd4b156
SC
1473 state->buflen_0 = 0;
1474 state->buflen_1 = 0;
045e3678
YK
1475
1476 return 0;
1477}
1478
1479static int ahash_update(struct ahash_request *req)
1480{
1481 struct caam_hash_state *state = ahash_request_ctx(req);
1482
1483 return state->update(req);
1484}
1485
1486static int ahash_finup(struct ahash_request *req)
1487{
1488 struct caam_hash_state *state = ahash_request_ctx(req);
1489
1490 return state->finup(req);
1491}
1492
1493static int ahash_final(struct ahash_request *req)
1494{
1495 struct caam_hash_state *state = ahash_request_ctx(req);
1496
1497 return state->final(req);
1498}
1499
1500static int ahash_export(struct ahash_request *req, void *out)
1501{
045e3678 1502 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831
RK
1503 struct caam_export_state *export = out;
1504 int len;
1505 u8 *buf;
045e3678 1506
5ec90831
RK
1507 if (state->current_buf) {
1508 buf = state->buf_1;
1509 len = state->buflen_1;
1510 } else {
1511 buf = state->buf_0;
f456cd2d 1512 len = state->buflen_0;
5ec90831
RK
1513 }
1514
1515 memcpy(export->buf, buf, len);
1516 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1517 export->buflen = len;
1518 export->update = state->update;
1519 export->final = state->final;
1520 export->finup = state->finup;
434b4212 1521
045e3678
YK
1522 return 0;
1523}
1524
1525static int ahash_import(struct ahash_request *req, const void *in)
1526{
045e3678 1527 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831 1528 const struct caam_export_state *export = in;
045e3678 1529
5ec90831
RK
1530 memset(state, 0, sizeof(*state));
1531 memcpy(state->buf_0, export->buf, export->buflen);
1532 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1533 state->buflen_0 = export->buflen;
1534 state->update = export->update;
1535 state->final = export->final;
1536 state->finup = export->finup;
434b4212 1537
045e3678
YK
1538 return 0;
1539}
1540
1541struct caam_hash_template {
1542 char name[CRYPTO_MAX_ALG_NAME];
1543 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1544 char hmac_name[CRYPTO_MAX_ALG_NAME];
1545 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1546 unsigned int blocksize;
1547 struct ahash_alg template_ahash;
1548 u32 alg_type;
045e3678
YK
1549};
1550
1551/* ahash descriptors */
1552static struct caam_hash_template driver_hash[] = {
1553 {
b0e09bae
YK
1554 .name = "sha1",
1555 .driver_name = "sha1-caam",
1556 .hmac_name = "hmac(sha1)",
1557 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1558 .blocksize = SHA1_BLOCK_SIZE,
1559 .template_ahash = {
1560 .init = ahash_init,
1561 .update = ahash_update,
1562 .final = ahash_final,
1563 .finup = ahash_finup,
1564 .digest = ahash_digest,
1565 .export = ahash_export,
1566 .import = ahash_import,
1567 .setkey = ahash_setkey,
1568 .halg = {
1569 .digestsize = SHA1_DIGEST_SIZE,
5ec90831 1570 .statesize = sizeof(struct caam_export_state),
045e3678 1571 },
659f313d 1572 },
045e3678 1573 .alg_type = OP_ALG_ALGSEL_SHA1,
045e3678 1574 }, {
b0e09bae
YK
1575 .name = "sha224",
1576 .driver_name = "sha224-caam",
1577 .hmac_name = "hmac(sha224)",
1578 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1579 .blocksize = SHA224_BLOCK_SIZE,
1580 .template_ahash = {
1581 .init = ahash_init,
1582 .update = ahash_update,
1583 .final = ahash_final,
1584 .finup = ahash_finup,
1585 .digest = ahash_digest,
1586 .export = ahash_export,
1587 .import = ahash_import,
1588 .setkey = ahash_setkey,
1589 .halg = {
1590 .digestsize = SHA224_DIGEST_SIZE,
5ec90831 1591 .statesize = sizeof(struct caam_export_state),
045e3678 1592 },
659f313d 1593 },
045e3678 1594 .alg_type = OP_ALG_ALGSEL_SHA224,
045e3678 1595 }, {
b0e09bae
YK
1596 .name = "sha256",
1597 .driver_name = "sha256-caam",
1598 .hmac_name = "hmac(sha256)",
1599 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1600 .blocksize = SHA256_BLOCK_SIZE,
1601 .template_ahash = {
1602 .init = ahash_init,
1603 .update = ahash_update,
1604 .final = ahash_final,
1605 .finup = ahash_finup,
1606 .digest = ahash_digest,
1607 .export = ahash_export,
1608 .import = ahash_import,
1609 .setkey = ahash_setkey,
1610 .halg = {
1611 .digestsize = SHA256_DIGEST_SIZE,
5ec90831 1612 .statesize = sizeof(struct caam_export_state),
045e3678 1613 },
659f313d 1614 },
045e3678 1615 .alg_type = OP_ALG_ALGSEL_SHA256,
045e3678 1616 }, {
b0e09bae
YK
1617 .name = "sha384",
1618 .driver_name = "sha384-caam",
1619 .hmac_name = "hmac(sha384)",
1620 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1621 .blocksize = SHA384_BLOCK_SIZE,
1622 .template_ahash = {
1623 .init = ahash_init,
1624 .update = ahash_update,
1625 .final = ahash_final,
1626 .finup = ahash_finup,
1627 .digest = ahash_digest,
1628 .export = ahash_export,
1629 .import = ahash_import,
1630 .setkey = ahash_setkey,
1631 .halg = {
1632 .digestsize = SHA384_DIGEST_SIZE,
5ec90831 1633 .statesize = sizeof(struct caam_export_state),
045e3678 1634 },
659f313d 1635 },
045e3678 1636 .alg_type = OP_ALG_ALGSEL_SHA384,
045e3678 1637 }, {
b0e09bae
YK
1638 .name = "sha512",
1639 .driver_name = "sha512-caam",
1640 .hmac_name = "hmac(sha512)",
1641 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1642 .blocksize = SHA512_BLOCK_SIZE,
1643 .template_ahash = {
1644 .init = ahash_init,
1645 .update = ahash_update,
1646 .final = ahash_final,
1647 .finup = ahash_finup,
1648 .digest = ahash_digest,
1649 .export = ahash_export,
1650 .import = ahash_import,
1651 .setkey = ahash_setkey,
1652 .halg = {
1653 .digestsize = SHA512_DIGEST_SIZE,
5ec90831 1654 .statesize = sizeof(struct caam_export_state),
045e3678 1655 },
659f313d 1656 },
045e3678 1657 .alg_type = OP_ALG_ALGSEL_SHA512,
045e3678 1658 }, {
b0e09bae
YK
1659 .name = "md5",
1660 .driver_name = "md5-caam",
1661 .hmac_name = "hmac(md5)",
1662 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1663 .blocksize = MD5_BLOCK_WORDS * 4,
1664 .template_ahash = {
1665 .init = ahash_init,
1666 .update = ahash_update,
1667 .final = ahash_final,
1668 .finup = ahash_finup,
1669 .digest = ahash_digest,
1670 .export = ahash_export,
1671 .import = ahash_import,
1672 .setkey = ahash_setkey,
1673 .halg = {
1674 .digestsize = MD5_DIGEST_SIZE,
5ec90831 1675 .statesize = sizeof(struct caam_export_state),
045e3678 1676 },
659f313d 1677 },
045e3678 1678 .alg_type = OP_ALG_ALGSEL_MD5,
045e3678
YK
1679 },
1680};
1681
1682struct caam_hash_alg {
1683 struct list_head entry;
045e3678 1684 int alg_type;
045e3678
YK
1685 struct ahash_alg ahash_alg;
1686};
1687
1688static int caam_hash_cra_init(struct crypto_tfm *tfm)
1689{
1690 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1691 struct crypto_alg *base = tfm->__crt_alg;
1692 struct hash_alg_common *halg =
1693 container_of(base, struct hash_alg_common, base);
1694 struct ahash_alg *alg =
1695 container_of(halg, struct ahash_alg, halg);
1696 struct caam_hash_alg *caam_hash =
1697 container_of(alg, struct caam_hash_alg, ahash_alg);
1698 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
045e3678
YK
1699 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1700 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1701 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1702 HASH_MSG_LEN + 32,
1703 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1704 HASH_MSG_LEN + 64,
1705 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
bbf22344 1706 dma_addr_t dma_addr;
045e3678
YK
1707
1708 /*
cfc6f11b 1709 * Get a Job ring from Job Ring driver to ensure in-order
045e3678
YK
1710 * crypto request processing per tfm
1711 */
cfc6f11b
RG
1712 ctx->jrdev = caam_jr_alloc();
1713 if (IS_ERR(ctx->jrdev)) {
1714 pr_err("Job Ring Device allocation for transform failed\n");
1715 return PTR_ERR(ctx->jrdev);
1716 }
bbf22344
HG
1717
1718 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1719 offsetof(struct caam_hash_ctx,
1720 sh_desc_update_dma),
1721 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1722 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1723 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1724 caam_jr_free(ctx->jrdev);
1725 return -ENOMEM;
1726 }
1727
1728 ctx->sh_desc_update_dma = dma_addr;
1729 ctx->sh_desc_update_first_dma = dma_addr +
1730 offsetof(struct caam_hash_ctx,
1731 sh_desc_update_first);
1732 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1733 sh_desc_fin);
1734 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1735 sh_desc_digest);
1736
045e3678 1737 /* copy descriptor header template value */
db57656b 1738 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
045e3678 1739
488ebc3a
HG
1740 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1741 OP_ALG_ALGSEL_SUBMASK) >>
045e3678
YK
1742 OP_ALG_ALGSEL_SHIFT];
1743
1744 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1745 sizeof(struct caam_hash_state));
e6cc5b8d 1746 return ahash_set_sh_desc(ahash);
045e3678
YK
1747}
1748
1749static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1750{
1751 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1752
bbf22344
HG
1753 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1754 offsetof(struct caam_hash_ctx,
1755 sh_desc_update_dma),
1756 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
cfc6f11b 1757 caam_jr_free(ctx->jrdev);
045e3678
YK
1758}
1759
1760static void __exit caam_algapi_hash_exit(void)
1761{
045e3678
YK
1762 struct caam_hash_alg *t_alg, *n;
1763
cfc6f11b 1764 if (!hash_list.next)
045e3678
YK
1765 return;
1766
cfc6f11b 1767 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
045e3678
YK
1768 crypto_unregister_ahash(&t_alg->ahash_alg);
1769 list_del(&t_alg->entry);
1770 kfree(t_alg);
1771 }
1772}
1773
1774static struct caam_hash_alg *
cfc6f11b 1775caam_hash_alloc(struct caam_hash_template *template,
b0e09bae 1776 bool keyed)
045e3678
YK
1777{
1778 struct caam_hash_alg *t_alg;
1779 struct ahash_alg *halg;
1780 struct crypto_alg *alg;
1781
9c4f9733 1782 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
045e3678 1783 if (!t_alg) {
cfc6f11b 1784 pr_err("failed to allocate t_alg\n");
045e3678
YK
1785 return ERR_PTR(-ENOMEM);
1786 }
1787
1788 t_alg->ahash_alg = template->template_ahash;
1789 halg = &t_alg->ahash_alg;
1790 alg = &halg->halg.base;
1791
b0e09bae
YK
1792 if (keyed) {
1793 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1794 template->hmac_name);
1795 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1796 template->hmac_driver_name);
1797 } else {
1798 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1799 template->name);
1800 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1801 template->driver_name);
a0118c8b 1802 t_alg->ahash_alg.setkey = NULL;
b0e09bae 1803 }
045e3678
YK
1804 alg->cra_module = THIS_MODULE;
1805 alg->cra_init = caam_hash_cra_init;
1806 alg->cra_exit = caam_hash_cra_exit;
1807 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1808 alg->cra_priority = CAAM_CRA_PRIORITY;
1809 alg->cra_blocksize = template->blocksize;
1810 alg->cra_alignmask = 0;
1811 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1812 alg->cra_type = &crypto_ahash_type;
1813
1814 t_alg->alg_type = template->alg_type;
045e3678
YK
1815
1816 return t_alg;
1817}
1818
1819static int __init caam_algapi_hash_init(void)
1820{
35af6403
RG
1821 struct device_node *dev_node;
1822 struct platform_device *pdev;
1823 struct device *ctrldev;
045e3678 1824 int i = 0, err = 0;
bf83490e
VM
1825 struct caam_drv_private *priv;
1826 unsigned int md_limit = SHA512_DIGEST_SIZE;
1827 u32 cha_inst, cha_vid;
045e3678 1828
35af6403
RG
1829 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1830 if (!dev_node) {
1831 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1832 if (!dev_node)
1833 return -ENODEV;
1834 }
1835
1836 pdev = of_find_device_by_node(dev_node);
1837 if (!pdev) {
1838 of_node_put(dev_node);
1839 return -ENODEV;
1840 }
1841
1842 ctrldev = &pdev->dev;
1843 priv = dev_get_drvdata(ctrldev);
1844 of_node_put(dev_node);
1845
1846 /*
1847 * If priv is NULL, it's probably because the caam driver wasn't
1848 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1849 */
1850 if (!priv)
1851 return -ENODEV;
1852
bf83490e
VM
1853 /*
1854 * Register crypto algorithms the device supports. First, identify
1855 * presence and attributes of MD block.
1856 */
1857 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1858 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1859
1860 /*
1861 * Skip registration of any hashing algorithms if MD block
1862 * is not present.
1863 */
1864 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1865 return -ENODEV;
1866
1867 /* Limit digest size based on LP256 */
1868 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1869 md_limit = SHA256_DIGEST_SIZE;
1870
cfc6f11b 1871 INIT_LIST_HEAD(&hash_list);
045e3678
YK
1872
1873 /* register crypto algorithms the device supports */
1874 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
045e3678 1875 struct caam_hash_alg *t_alg;
bf83490e
VM
1876 struct caam_hash_template *alg = driver_hash + i;
1877
1878 /* If MD size is not supported by device, skip registration */
1879 if (alg->template_ahash.halg.digestsize > md_limit)
1880 continue;
045e3678 1881
b0e09bae 1882 /* register hmac version */
bf83490e 1883 t_alg = caam_hash_alloc(alg, true);
b0e09bae
YK
1884 if (IS_ERR(t_alg)) {
1885 err = PTR_ERR(t_alg);
bf83490e 1886 pr_warn("%s alg allocation failed\n", alg->driver_name);
b0e09bae
YK
1887 continue;
1888 }
1889
1890 err = crypto_register_ahash(&t_alg->ahash_alg);
1891 if (err) {
6ea30f0a
RK
1892 pr_warn("%s alg registration failed: %d\n",
1893 t_alg->ahash_alg.halg.base.cra_driver_name,
1894 err);
b0e09bae
YK
1895 kfree(t_alg);
1896 } else
cfc6f11b 1897 list_add_tail(&t_alg->entry, &hash_list);
b0e09bae
YK
1898
1899 /* register unkeyed version */
bf83490e 1900 t_alg = caam_hash_alloc(alg, false);
045e3678
YK
1901 if (IS_ERR(t_alg)) {
1902 err = PTR_ERR(t_alg);
bf83490e 1903 pr_warn("%s alg allocation failed\n", alg->driver_name);
045e3678
YK
1904 continue;
1905 }
1906
1907 err = crypto_register_ahash(&t_alg->ahash_alg);
1908 if (err) {
6ea30f0a
RK
1909 pr_warn("%s alg registration failed: %d\n",
1910 t_alg->ahash_alg.halg.base.cra_driver_name,
1911 err);
045e3678
YK
1912 kfree(t_alg);
1913 } else
cfc6f11b 1914 list_add_tail(&t_alg->entry, &hash_list);
045e3678
YK
1915 }
1916
1917 return err;
1918}
1919
1920module_init(caam_algapi_hash_init);
1921module_exit(caam_algapi_hash_exit);
1922
1923MODULE_LICENSE("GPL");
1924MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1925MODULE_AUTHOR("Freescale Semiconductor - NMG");