]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/caam/caamhash.c
crypto: caam - add support for xcbc(aes)
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / caam / caamhash.c
CommitLineData
618b5dc4 1// SPDX-License-Identifier: GPL-2.0+
045e3678
YK
2/*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
d239b10d 6 * Copyright 2018 NXP
045e3678
YK
7 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
12 *
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
18 *
19 * relationship of subsequent job descriptors to shared descriptors:
20 *
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
37 *
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
43 *
44 * So, a job desc looks like:
45 *
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
56 */
57
58#include "compat.h"
59
60#include "regs.h"
61#include "intern.h"
62#include "desc_constr.h"
63#include "jr.h"
64#include "error.h"
65#include "sg_sw_sec4.h"
66#include "key_gen.h"
0efa7579 67#include "caamhash_desc.h"
045e3678
YK
68
69#define CAAM_CRA_PRIORITY 3000
70
71/* max hash key is max split key size */
72#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
73
74#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
76
045e3678
YK
77#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80
81/* caam context sizes for hashes: running digest + 8 */
82#define HASH_MSG_LEN 8
83#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84
85#ifdef DEBUG
86/* for print_hex_dumps with line references */
045e3678
YK
87#define debug(format, arg...) printk(format, arg)
88#else
89#define debug(format, arg...)
90#endif
91
cfc6f11b
RG
92
93static struct list_head hash_list;
94
045e3678
YK
95/* ahash per-session context */
96struct caam_hash_ctx {
e11793f5
RK
97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
12b8567f 101 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
e11793f5 102 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
045e3678
YK
103 dma_addr_t sh_desc_update_first_dma;
104 dma_addr_t sh_desc_fin_dma;
105 dma_addr_t sh_desc_digest_dma;
12b8567f 106 dma_addr_t key_dma;
7e0880b9 107 enum dma_data_direction dir;
e11793f5 108 struct device *jrdev;
045e3678 109 int ctx_len;
db57656b 110 struct alginfo adata;
045e3678
YK
111};
112
113/* ahash state */
114struct caam_hash_state {
115 dma_addr_t buf_dma;
116 dma_addr_t ctx_dma;
117 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118 int buflen_0;
119 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
120 int buflen_1;
e7472422 121 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
045e3678
YK
122 int (*update)(struct ahash_request *req);
123 int (*final)(struct ahash_request *req);
124 int (*finup)(struct ahash_request *req);
125 int current_buf;
126};
127
5ec90831
RK
128struct caam_export_state {
129 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
130 u8 caam_ctx[MAX_CTX_LEN];
131 int buflen;
132 int (*update)(struct ahash_request *req);
133 int (*final)(struct ahash_request *req);
134 int (*finup)(struct ahash_request *req);
135};
136
0355d23d
HG
137static inline void switch_buf(struct caam_hash_state *state)
138{
139 state->current_buf ^= 1;
140}
141
142static inline u8 *current_buf(struct caam_hash_state *state)
143{
144 return state->current_buf ? state->buf_1 : state->buf_0;
145}
146
147static inline u8 *alt_buf(struct caam_hash_state *state)
148{
149 return state->current_buf ? state->buf_0 : state->buf_1;
150}
151
152static inline int *current_buflen(struct caam_hash_state *state)
153{
154 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
155}
156
157static inline int *alt_buflen(struct caam_hash_state *state)
158{
159 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
160}
161
12b8567f
IP
162static inline bool is_xcbc_aes(u32 algtype)
163{
164 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
165 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
166}
167
045e3678
YK
168/* Common job descriptor seq in/out ptr routines */
169
170/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
ce572085
HG
171static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
172 struct caam_hash_state *state,
173 int ctx_len)
045e3678
YK
174{
175 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
176 ctx_len, DMA_FROM_DEVICE);
ce572085
HG
177 if (dma_mapping_error(jrdev, state->ctx_dma)) {
178 dev_err(jrdev, "unable to map ctx\n");
87ec02e7 179 state->ctx_dma = 0;
ce572085
HG
180 return -ENOMEM;
181 }
182
045e3678 183 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
ce572085
HG
184
185 return 0;
045e3678
YK
186}
187
188/* Map req->result, and append seq_out_ptr command that points to it */
189static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
190 u8 *result, int digestsize)
191{
192 dma_addr_t dst_dma;
193
194 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
195 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
196
197 return dst_dma;
198}
199
944c3d4d
HG
200/* Map current buffer in state (if length > 0) and put it in link table */
201static inline int buf_map_to_sec4_sg(struct device *jrdev,
202 struct sec4_sg_entry *sec4_sg,
203 struct caam_hash_state *state)
045e3678 204{
944c3d4d 205 int buflen = *current_buflen(state);
045e3678 206
944c3d4d
HG
207 if (!buflen)
208 return 0;
045e3678 209
944c3d4d
HG
210 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
211 DMA_TO_DEVICE);
212 if (dma_mapping_error(jrdev, state->buf_dma)) {
213 dev_err(jrdev, "unable to map buf\n");
214 state->buf_dma = 0;
215 return -ENOMEM;
216 }
045e3678 217
944c3d4d
HG
218 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
219
220 return 0;
045e3678
YK
221}
222
223/* Map state->caam_ctx, and add it to link table */
dfcd8393 224static inline int ctx_map_to_sec4_sg(struct device *jrdev,
ce572085
HG
225 struct caam_hash_state *state, int ctx_len,
226 struct sec4_sg_entry *sec4_sg, u32 flag)
045e3678
YK
227{
228 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
ce572085
HG
229 if (dma_mapping_error(jrdev, state->ctx_dma)) {
230 dev_err(jrdev, "unable to map ctx\n");
87ec02e7 231 state->ctx_dma = 0;
ce572085
HG
232 return -ENOMEM;
233 }
234
045e3678 235 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
ce572085
HG
236
237 return 0;
045e3678
YK
238}
239
045e3678
YK
240static int ahash_set_sh_desc(struct crypto_ahash *ahash)
241{
242 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
243 int digestsize = crypto_ahash_digestsize(ahash);
244 struct device *jrdev = ctx->jrdev;
7e0880b9 245 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
045e3678
YK
246 u32 *desc;
247
7e0880b9
HG
248 ctx->adata.key_virt = ctx->key;
249
045e3678
YK
250 /* ahash_update shared descriptor */
251 desc = ctx->sh_desc_update;
0efa7579
HG
252 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
253 ctx->ctx_len, true, ctrlpriv->era);
bbf22344 254 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
7e0880b9 255 desc_bytes(desc), ctx->dir);
045e3678 256#ifdef DEBUG
514df281
AP
257 print_hex_dump(KERN_ERR,
258 "ahash update shdesc@"__stringify(__LINE__)": ",
045e3678
YK
259 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
260#endif
261
262 /* ahash_update_first shared descriptor */
263 desc = ctx->sh_desc_update_first;
0efa7579
HG
264 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
265 ctx->ctx_len, false, ctrlpriv->era);
bbf22344 266 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
7e0880b9 267 desc_bytes(desc), ctx->dir);
045e3678 268#ifdef DEBUG
514df281
AP
269 print_hex_dump(KERN_ERR,
270 "ahash update first shdesc@"__stringify(__LINE__)": ",
045e3678
YK
271 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
272#endif
273
274 /* ahash_final shared descriptor */
275 desc = ctx->sh_desc_fin;
0efa7579
HG
276 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
277 ctx->ctx_len, true, ctrlpriv->era);
bbf22344 278 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
7e0880b9 279 desc_bytes(desc), ctx->dir);
045e3678 280#ifdef DEBUG
514df281 281 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
045e3678
YK
282 DUMP_PREFIX_ADDRESS, 16, 4, desc,
283 desc_bytes(desc), 1);
284#endif
285
045e3678
YK
286 /* ahash_digest shared descriptor */
287 desc = ctx->sh_desc_digest;
0efa7579
HG
288 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
289 ctx->ctx_len, false, ctrlpriv->era);
bbf22344 290 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
7e0880b9 291 desc_bytes(desc), ctx->dir);
045e3678 292#ifdef DEBUG
514df281
AP
293 print_hex_dump(KERN_ERR,
294 "ahash digest shdesc@"__stringify(__LINE__)": ",
045e3678
YK
295 DUMP_PREFIX_ADDRESS, 16, 4, desc,
296 desc_bytes(desc), 1);
297#endif
298
299 return 0;
300}
301
12b8567f
IP
302static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
303{
304 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
305 int digestsize = crypto_ahash_digestsize(ahash);
306 struct device *jrdev = ctx->jrdev;
307 u32 *desc;
308
309 /* key is loaded from memory for UPDATE and FINALIZE states */
310 ctx->adata.key_dma = ctx->key_dma;
311
312 /* shared descriptor for ahash_update */
313 desc = ctx->sh_desc_update;
314 cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
315 ctx->ctx_len, 0);
316 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
317 desc_bytes(desc), ctx->dir);
318 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
319 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
320 1);
321
322 /* shared descriptor for ahash_{final,finup} */
323 desc = ctx->sh_desc_fin;
324 cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
325 ctx->ctx_len, 0);
326 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
327 desc_bytes(desc), ctx->dir);
328 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
329 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
330 1);
331
332 /* key is immediate data for INIT and INITFINAL states */
333 ctx->adata.key_virt = ctx->key;
334
335 /* shared descriptor for first invocation of ahash_update */
336 desc = ctx->sh_desc_update_first;
337 cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338 ctx->ctx_len, ctx->key_dma);
339 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340 desc_bytes(desc), ctx->dir);
341 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
343 1);
344
345 /* shared descriptor for ahash_digest */
346 desc = ctx->sh_desc_digest;
347 cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
348 ctx->ctx_len, 0);
349 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350 desc_bytes(desc), ctx->dir);
351 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
352 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
353 1);
354
355 return 0;
356}
357
045e3678 358/* Digest hash size if it is too large */
66b3e887 359static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
045e3678
YK
360 u32 *keylen, u8 *key_out, u32 digestsize)
361{
362 struct device *jrdev = ctx->jrdev;
363 u32 *desc;
364 struct split_key_result result;
365 dma_addr_t src_dma, dst_dma;
9e6df0fd 366 int ret;
045e3678 367
9c23b7d3 368 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
2af8f4a2
KP
369 if (!desc) {
370 dev_err(jrdev, "unable to allocate key input memory\n");
371 return -ENOMEM;
372 }
045e3678
YK
373
374 init_job_desc(desc, 0);
375
376 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
377 DMA_TO_DEVICE);
378 if (dma_mapping_error(jrdev, src_dma)) {
379 dev_err(jrdev, "unable to map key input memory\n");
380 kfree(desc);
381 return -ENOMEM;
382 }
383 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
384 DMA_FROM_DEVICE);
385 if (dma_mapping_error(jrdev, dst_dma)) {
386 dev_err(jrdev, "unable to map key output memory\n");
387 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
388 kfree(desc);
389 return -ENOMEM;
390 }
391
392 /* Job descriptor to perform unkeyed hash on key_in */
db57656b 393 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
045e3678
YK
394 OP_ALG_AS_INITFINAL);
395 append_seq_in_ptr(desc, src_dma, *keylen, 0);
396 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
397 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
398 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
399 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
400 LDST_SRCDST_BYTE_CONTEXT);
401
402#ifdef DEBUG
514df281 403 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
045e3678 404 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
514df281 405 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
406 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
407#endif
408
409 result.err = 0;
410 init_completion(&result.completion);
411
412 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
413 if (!ret) {
414 /* in progress */
7459e1d2 415 wait_for_completion(&result.completion);
045e3678
YK
416 ret = result.err;
417#ifdef DEBUG
514df281
AP
418 print_hex_dump(KERN_ERR,
419 "digested key@"__stringify(__LINE__)": ",
045e3678
YK
420 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
421 digestsize, 1);
422#endif
423 }
045e3678
YK
424 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
425 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
426
e11aa9f1
HG
427 *keylen = digestsize;
428
045e3678
YK
429 kfree(desc);
430
431 return ret;
432}
433
434static int ahash_setkey(struct crypto_ahash *ahash,
435 const u8 *key, unsigned int keylen)
436{
045e3678 437 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
045e3678
YK
438 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
439 int digestsize = crypto_ahash_digestsize(ahash);
7e0880b9 440 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
9e6df0fd 441 int ret;
045e3678
YK
442 u8 *hashed_key = NULL;
443
444#ifdef DEBUG
445 printk(KERN_ERR "keylen %d\n", keylen);
446#endif
447
448 if (keylen > blocksize) {
e7a33c4d
ME
449 hashed_key = kmalloc_array(digestsize,
450 sizeof(*hashed_key),
451 GFP_KERNEL | GFP_DMA);
045e3678
YK
452 if (!hashed_key)
453 return -ENOMEM;
454 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
455 digestsize);
456 if (ret)
d6e7a7d0 457 goto bad_free_key;
045e3678
YK
458 key = hashed_key;
459 }
460
7e0880b9
HG
461 /*
462 * If DKP is supported, use it in the shared descriptor to generate
463 * the split key.
464 */
465 if (ctrlpriv->era >= 6) {
466 ctx->adata.key_inline = true;
467 ctx->adata.keylen = keylen;
468 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
469 OP_ALG_ALGSEL_MASK);
045e3678 470
7e0880b9
HG
471 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
472 goto bad_free_key;
473
474 memcpy(ctx->key, key, keylen);
475 } else {
476 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
477 keylen, CAAM_MAX_HASH_KEY_SIZE);
478 if (ret)
479 goto bad_free_key;
480 }
045e3678 481
045e3678 482 kfree(hashed_key);
cfb725f6 483 return ahash_set_sh_desc(ahash);
d6e7a7d0 484 bad_free_key:
045e3678
YK
485 kfree(hashed_key);
486 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
487 return -EINVAL;
488}
489
12b8567f
IP
490static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
491 unsigned int keylen)
492{
493 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
494 struct device *jrdev = ctx->jrdev;
495
496 memcpy(ctx->key, key, keylen);
497 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
498 ctx->adata.keylen = keylen;
499
500 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
501 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
502
503 return axcbc_set_sh_desc(ahash);
504}
045e3678
YK
505/*
506 * ahash_edesc - s/w-extended ahash descriptor
507 * @dst_dma: physical mapped address of req->result
508 * @sec4_sg_dma: physical mapped address of h/w link table
509 * @src_nents: number of segments in input scatterlist
510 * @sec4_sg_bytes: length of dma mapped sec4_sg space
045e3678 511 * @hw_desc: the h/w job descriptor followed by any referenced link tables
343e44b1 512 * @sec4_sg: h/w link table
045e3678
YK
513 */
514struct ahash_edesc {
515 dma_addr_t dst_dma;
516 dma_addr_t sec4_sg_dma;
517 int src_nents;
518 int sec4_sg_bytes;
d7b24ed4 519 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
343e44b1 520 struct sec4_sg_entry sec4_sg[0];
045e3678
YK
521};
522
523static inline void ahash_unmap(struct device *dev,
524 struct ahash_edesc *edesc,
525 struct ahash_request *req, int dst_len)
526{
944c3d4d
HG
527 struct caam_hash_state *state = ahash_request_ctx(req);
528
045e3678 529 if (edesc->src_nents)
13fb8fd7 530 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
045e3678
YK
531 if (edesc->dst_dma)
532 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
533
534 if (edesc->sec4_sg_bytes)
535 dma_unmap_single(dev, edesc->sec4_sg_dma,
536 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
944c3d4d
HG
537
538 if (state->buf_dma) {
539 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
540 DMA_TO_DEVICE);
541 state->buf_dma = 0;
542 }
045e3678
YK
543}
544
545static inline void ahash_unmap_ctx(struct device *dev,
546 struct ahash_edesc *edesc,
547 struct ahash_request *req, int dst_len, u32 flag)
548{
549 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
550 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
551 struct caam_hash_state *state = ahash_request_ctx(req);
552
87ec02e7 553 if (state->ctx_dma) {
045e3678 554 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
87ec02e7
HG
555 state->ctx_dma = 0;
556 }
045e3678
YK
557 ahash_unmap(dev, edesc, req, dst_len);
558}
559
560static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
561 void *context)
562{
563 struct ahash_request *req = context;
564 struct ahash_edesc *edesc;
565 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
566 int digestsize = crypto_ahash_digestsize(ahash);
567#ifdef DEBUG
568 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
569 struct caam_hash_state *state = ahash_request_ctx(req);
570
571 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
572#endif
573
4ca7c7d8 574 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
575 if (err)
576 caam_jr_strstatus(jrdev, err);
045e3678
YK
577
578 ahash_unmap(jrdev, edesc, req, digestsize);
579 kfree(edesc);
580
581#ifdef DEBUG
514df281 582 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
583 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
584 ctx->ctx_len, 1);
585 if (req->result)
514df281 586 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
587 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
588 digestsize, 1);
589#endif
590
591 req->base.complete(&req->base, err);
592}
593
594static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
595 void *context)
596{
597 struct ahash_request *req = context;
598 struct ahash_edesc *edesc;
599 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
600 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
045e3678 601 struct caam_hash_state *state = ahash_request_ctx(req);
944c3d4d 602#ifdef DEBUG
045e3678
YK
603 int digestsize = crypto_ahash_digestsize(ahash);
604
605 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
606#endif
607
4ca7c7d8 608 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
609 if (err)
610 caam_jr_strstatus(jrdev, err);
045e3678
YK
611
612 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
944c3d4d 613 switch_buf(state);
045e3678
YK
614 kfree(edesc);
615
616#ifdef DEBUG
514df281 617 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
618 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
619 ctx->ctx_len, 1);
620 if (req->result)
514df281 621 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
622 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
623 digestsize, 1);
624#endif
625
626 req->base.complete(&req->base, err);
627}
628
629static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
630 void *context)
631{
632 struct ahash_request *req = context;
633 struct ahash_edesc *edesc;
634 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
635 int digestsize = crypto_ahash_digestsize(ahash);
636#ifdef DEBUG
637 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
638 struct caam_hash_state *state = ahash_request_ctx(req);
639
640 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
641#endif
642
4ca7c7d8 643 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
644 if (err)
645 caam_jr_strstatus(jrdev, err);
045e3678 646
bc9e05f9 647 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
045e3678
YK
648 kfree(edesc);
649
650#ifdef DEBUG
514df281 651 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
652 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
653 ctx->ctx_len, 1);
654 if (req->result)
514df281 655 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
656 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
657 digestsize, 1);
658#endif
659
660 req->base.complete(&req->base, err);
661}
662
663static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
664 void *context)
665{
666 struct ahash_request *req = context;
667 struct ahash_edesc *edesc;
668 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
045e3678 670 struct caam_hash_state *state = ahash_request_ctx(req);
944c3d4d 671#ifdef DEBUG
045e3678
YK
672 int digestsize = crypto_ahash_digestsize(ahash);
673
674 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
675#endif
676
4ca7c7d8 677 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
fa9659cd
MV
678 if (err)
679 caam_jr_strstatus(jrdev, err);
045e3678 680
ef62b231 681 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
944c3d4d 682 switch_buf(state);
045e3678
YK
683 kfree(edesc);
684
685#ifdef DEBUG
514df281 686 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
045e3678
YK
687 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
688 ctx->ctx_len, 1);
689 if (req->result)
514df281 690 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
045e3678
YK
691 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
692 digestsize, 1);
693#endif
694
695 req->base.complete(&req->base, err);
696}
697
5588d039
RK
698/*
699 * Allocate an enhanced descriptor, which contains the hardware descriptor
700 * and space for hardware scatter table containing sg_num entries.
701 */
702static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
30a43b44
RK
703 int sg_num, u32 *sh_desc,
704 dma_addr_t sh_desc_dma,
705 gfp_t flags)
5588d039
RK
706{
707 struct ahash_edesc *edesc;
708 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
709
710 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
711 if (!edesc) {
712 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
713 return NULL;
714 }
715
30a43b44
RK
716 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
717 HDR_SHARE_DEFER | HDR_REVERSE);
718
5588d039
RK
719 return edesc;
720}
721
65cf164a
RK
722static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
723 struct ahash_edesc *edesc,
724 struct ahash_request *req, int nents,
725 unsigned int first_sg,
726 unsigned int first_bytes, size_t to_hash)
727{
728 dma_addr_t src_dma;
729 u32 options;
730
731 if (nents > 1 || first_sg) {
732 struct sec4_sg_entry *sg = edesc->sec4_sg;
733 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
734
735 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
736
737 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
738 if (dma_mapping_error(ctx->jrdev, src_dma)) {
739 dev_err(ctx->jrdev, "unable to map S/G table\n");
740 return -ENOMEM;
741 }
742
743 edesc->sec4_sg_bytes = sgsize;
744 edesc->sec4_sg_dma = src_dma;
745 options = LDST_SGF;
746 } else {
747 src_dma = sg_dma_address(req->src);
748 options = 0;
749 }
750
751 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
752 options);
753
754 return 0;
755}
756
045e3678
YK
757/* submit update job descriptor */
758static int ahash_update_ctx(struct ahash_request *req)
759{
760 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
761 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
762 struct caam_hash_state *state = ahash_request_ctx(req);
763 struct device *jrdev = ctx->jrdev;
019d62db
HG
764 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
765 GFP_KERNEL : GFP_ATOMIC;
0355d23d
HG
766 u8 *buf = current_buf(state);
767 int *buflen = current_buflen(state);
768 u8 *next_buf = alt_buf(state);
12b8567f 769 int blocksize = crypto_ahash_blocksize(ahash);
0355d23d 770 int *next_buflen = alt_buflen(state), last_buflen;
045e3678 771 int in_len = *buflen + req->nbytes, to_hash;
30a43b44 772 u32 *desc;
bc13c69e 773 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
774 struct ahash_edesc *edesc;
775 int ret = 0;
045e3678
YK
776
777 last_buflen = *next_buflen;
12b8567f 778 *next_buflen = in_len & (blocksize - 1);
045e3678
YK
779 to_hash = in_len - *next_buflen;
780
12b8567f
IP
781 /*
782 * For XCBC, if to_hash is multiple of block size,
783 * keep last block in internal buffer
784 */
785 if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
786 (*next_buflen == 0)) {
787 *next_buflen = blocksize;
788 to_hash -= blocksize;
789 }
790
045e3678 791 if (to_hash) {
13fb8fd7
LC
792 src_nents = sg_nents_for_len(req->src,
793 req->nbytes - (*next_buflen));
f9970c28
LC
794 if (src_nents < 0) {
795 dev_err(jrdev, "Invalid number of src SG.\n");
796 return src_nents;
797 }
bc13c69e
RK
798
799 if (src_nents) {
800 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
801 DMA_TO_DEVICE);
802 if (!mapped_nents) {
803 dev_err(jrdev, "unable to DMA map source\n");
804 return -ENOMEM;
805 }
806 } else {
807 mapped_nents = 0;
808 }
809
045e3678 810 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
bc13c69e 811 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
812 sizeof(struct sec4_sg_entry);
813
814 /*
815 * allocate space for base edesc and hw desc commands,
816 * link tables
817 */
5588d039 818 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
30a43b44
RK
819 ctx->sh_desc_update,
820 ctx->sh_desc_update_dma, flags);
045e3678 821 if (!edesc) {
bc13c69e 822 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
823 return -ENOMEM;
824 }
825
826 edesc->src_nents = src_nents;
827 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 828
dfcd8393 829 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
ce572085
HG
830 edesc->sec4_sg, DMA_BIDIRECTIONAL);
831 if (ret)
58b0e5d0 832 goto unmap_ctx;
045e3678 833
944c3d4d
HG
834 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
835 if (ret)
836 goto unmap_ctx;
045e3678 837
bc13c69e
RK
838 if (mapped_nents) {
839 sg_to_sec4_sg_last(req->src, mapped_nents,
840 edesc->sec4_sg + sec4_sg_src_index,
841 0);
8af7b0f8 842 if (*next_buflen)
307fd543
CS
843 scatterwalk_map_and_copy(next_buf, req->src,
844 to_hash - *buflen,
845 *next_buflen, 0);
045e3678 846 } else {
297b9ceb
HG
847 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
848 1);
045e3678
YK
849 }
850
045e3678 851 desc = edesc->hw_desc;
045e3678 852
1da2be33
RG
853 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
854 sec4_sg_bytes,
855 DMA_TO_DEVICE);
ce572085
HG
856 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
857 dev_err(jrdev, "unable to map S/G table\n");
32686d34 858 ret = -ENOMEM;
58b0e5d0 859 goto unmap_ctx;
ce572085 860 }
1da2be33 861
045e3678
YK
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
863 to_hash, LDST_SGF);
864
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
866
867#ifdef DEBUG
514df281 868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
869 DUMP_PREFIX_ADDRESS, 16, 4, desc,
870 desc_bytes(desc), 1);
871#endif
872
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
32686d34 874 if (ret)
58b0e5d0 875 goto unmap_ctx;
32686d34
RK
876
877 ret = -EINPROGRESS;
045e3678 878 } else if (*next_buflen) {
307fd543
CS
879 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
880 req->nbytes, 0);
045e3678
YK
881 *buflen = *next_buflen;
882 *next_buflen = last_buflen;
883 }
884#ifdef DEBUG
514df281 885 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 886 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 887 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
888 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
889 *next_buflen, 1);
890#endif
891
892 return ret;
58b0e5d0 893 unmap_ctx:
32686d34
RK
894 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
895 kfree(edesc);
896 return ret;
045e3678
YK
897}
898
899static int ahash_final_ctx(struct ahash_request *req)
900{
901 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
902 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
903 struct caam_hash_state *state = ahash_request_ctx(req);
904 struct device *jrdev = ctx->jrdev;
019d62db
HG
905 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
906 GFP_KERNEL : GFP_ATOMIC;
0355d23d 907 int buflen = *current_buflen(state);
30a43b44 908 u32 *desc;
b310c178 909 int sec4_sg_bytes, sec4_sg_src_index;
045e3678
YK
910 int digestsize = crypto_ahash_digestsize(ahash);
911 struct ahash_edesc *edesc;
9e6df0fd 912 int ret;
045e3678 913
b310c178
HG
914 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
915 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
045e3678
YK
916
917 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
918 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
919 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
920 flags);
5588d039 921 if (!edesc)
045e3678 922 return -ENOMEM;
045e3678 923
045e3678 924 desc = edesc->hw_desc;
045e3678
YK
925
926 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 927
dfcd8393 928 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
ce572085
HG
929 edesc->sec4_sg, DMA_TO_DEVICE);
930 if (ret)
58b0e5d0 931 goto unmap_ctx;
045e3678 932
944c3d4d
HG
933 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
934 if (ret)
935 goto unmap_ctx;
936
297b9ceb 937 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
045e3678 938
1da2be33
RG
939 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
940 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
941 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
942 dev_err(jrdev, "unable to map S/G table\n");
32686d34 943 ret = -ENOMEM;
58b0e5d0 944 goto unmap_ctx;
ce572085 945 }
1da2be33 946
045e3678
YK
947 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
948 LDST_SGF);
949
950 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
951 digestsize);
ce572085
HG
952 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
953 dev_err(jrdev, "unable to map dst\n");
32686d34 954 ret = -ENOMEM;
58b0e5d0 955 goto unmap_ctx;
ce572085 956 }
045e3678
YK
957
958#ifdef DEBUG
514df281 959 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
960 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
961#endif
962
963 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 964 if (ret)
58b0e5d0 965 goto unmap_ctx;
045e3678 966
32686d34 967 return -EINPROGRESS;
58b0e5d0 968 unmap_ctx:
32686d34
RK
969 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
970 kfree(edesc);
045e3678
YK
971 return ret;
972}
973
974static int ahash_finup_ctx(struct ahash_request *req)
975{
976 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
977 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
978 struct caam_hash_state *state = ahash_request_ctx(req);
979 struct device *jrdev = ctx->jrdev;
019d62db
HG
980 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
981 GFP_KERNEL : GFP_ATOMIC;
0355d23d 982 int buflen = *current_buflen(state);
30a43b44 983 u32 *desc;
65cf164a 984 int sec4_sg_src_index;
bc13c69e 985 int src_nents, mapped_nents;
045e3678
YK
986 int digestsize = crypto_ahash_digestsize(ahash);
987 struct ahash_edesc *edesc;
9e6df0fd 988 int ret;
045e3678 989
13fb8fd7 990 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
991 if (src_nents < 0) {
992 dev_err(jrdev, "Invalid number of src SG.\n");
993 return src_nents;
994 }
bc13c69e
RK
995
996 if (src_nents) {
997 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
998 DMA_TO_DEVICE);
999 if (!mapped_nents) {
1000 dev_err(jrdev, "unable to DMA map source\n");
1001 return -ENOMEM;
1002 }
1003 } else {
1004 mapped_nents = 0;
1005 }
1006
045e3678 1007 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
045e3678
YK
1008
1009 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1010 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
9a1a1c08 1011 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
5588d039 1012 flags);
045e3678 1013 if (!edesc) {
bc13c69e 1014 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1015 return -ENOMEM;
1016 }
1017
045e3678 1018 desc = edesc->hw_desc;
045e3678
YK
1019
1020 edesc->src_nents = src_nents;
045e3678 1021
dfcd8393 1022 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
ce572085
HG
1023 edesc->sec4_sg, DMA_TO_DEVICE);
1024 if (ret)
58b0e5d0 1025 goto unmap_ctx;
045e3678 1026
944c3d4d
HG
1027 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1028 if (ret)
1029 goto unmap_ctx;
045e3678 1030
65cf164a
RK
1031 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1032 sec4_sg_src_index, ctx->ctx_len + buflen,
1033 req->nbytes);
1034 if (ret)
58b0e5d0 1035 goto unmap_ctx;
045e3678
YK
1036
1037 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1038 digestsize);
ce572085
HG
1039 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1040 dev_err(jrdev, "unable to map dst\n");
32686d34 1041 ret = -ENOMEM;
58b0e5d0 1042 goto unmap_ctx;
ce572085 1043 }
045e3678
YK
1044
1045#ifdef DEBUG
514df281 1046 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1047 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1048#endif
1049
1050 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
32686d34 1051 if (ret)
58b0e5d0 1052 goto unmap_ctx;
045e3678 1053
32686d34 1054 return -EINPROGRESS;
58b0e5d0 1055 unmap_ctx:
32686d34
RK
1056 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1057 kfree(edesc);
045e3678
YK
1058 return ret;
1059}
1060
1061static int ahash_digest(struct ahash_request *req)
1062{
1063 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1064 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
944c3d4d 1065 struct caam_hash_state *state = ahash_request_ctx(req);
045e3678 1066 struct device *jrdev = ctx->jrdev;
019d62db
HG
1067 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1068 GFP_KERNEL : GFP_ATOMIC;
30a43b44 1069 u32 *desc;
045e3678 1070 int digestsize = crypto_ahash_digestsize(ahash);
65cf164a 1071 int src_nents, mapped_nents;
045e3678 1072 struct ahash_edesc *edesc;
9e6df0fd 1073 int ret;
045e3678 1074
944c3d4d
HG
1075 state->buf_dma = 0;
1076
3d5a2db6 1077 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1078 if (src_nents < 0) {
1079 dev_err(jrdev, "Invalid number of src SG.\n");
1080 return src_nents;
1081 }
bc13c69e
RK
1082
1083 if (src_nents) {
1084 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1085 DMA_TO_DEVICE);
1086 if (!mapped_nents) {
1087 dev_err(jrdev, "unable to map source for DMA\n");
1088 return -ENOMEM;
1089 }
1090 } else {
1091 mapped_nents = 0;
1092 }
1093
045e3678 1094 /* allocate space for base edesc and hw desc commands, link tables */
5588d039 1095 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
30a43b44 1096 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
5588d039 1097 flags);
045e3678 1098 if (!edesc) {
bc13c69e 1099 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1100 return -ENOMEM;
1101 }
343e44b1 1102
045e3678
YK
1103 edesc->src_nents = src_nents;
1104
65cf164a
RK
1105 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1106 req->nbytes);
1107 if (ret) {
1108 ahash_unmap(jrdev, edesc, req, digestsize);
1109 kfree(edesc);
1110 return ret;
045e3678 1111 }
65cf164a
RK
1112
1113 desc = edesc->hw_desc;
045e3678
YK
1114
1115 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1116 digestsize);
ce572085
HG
1117 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1118 dev_err(jrdev, "unable to map dst\n");
32686d34
RK
1119 ahash_unmap(jrdev, edesc, req, digestsize);
1120 kfree(edesc);
ce572085
HG
1121 return -ENOMEM;
1122 }
045e3678
YK
1123
1124#ifdef DEBUG
514df281 1125 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1126 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1127#endif
1128
1129 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1130 if (!ret) {
1131 ret = -EINPROGRESS;
1132 } else {
1133 ahash_unmap(jrdev, edesc, req, digestsize);
1134 kfree(edesc);
1135 }
1136
1137 return ret;
1138}
1139
1140/* submit ahash final if it the first job descriptor */
1141static int ahash_final_no_ctx(struct ahash_request *req)
1142{
1143 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1144 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1145 struct caam_hash_state *state = ahash_request_ctx(req);
1146 struct device *jrdev = ctx->jrdev;
019d62db
HG
1147 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1148 GFP_KERNEL : GFP_ATOMIC;
0355d23d
HG
1149 u8 *buf = current_buf(state);
1150 int buflen = *current_buflen(state);
30a43b44 1151 u32 *desc;
045e3678
YK
1152 int digestsize = crypto_ahash_digestsize(ahash);
1153 struct ahash_edesc *edesc;
9e6df0fd 1154 int ret;
045e3678
YK
1155
1156 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1157 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1158 ctx->sh_desc_digest_dma, flags);
5588d039 1159 if (!edesc)
045e3678 1160 return -ENOMEM;
045e3678 1161
045e3678 1162 desc = edesc->hw_desc;
045e3678 1163
04e6d25c
AS
1164 if (buflen) {
1165 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1166 DMA_TO_DEVICE);
1167 if (dma_mapping_error(jrdev, state->buf_dma)) {
1168 dev_err(jrdev, "unable to map src\n");
1169 goto unmap;
1170 }
045e3678 1171
04e6d25c
AS
1172 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1173 }
045e3678
YK
1174
1175 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1176 digestsize);
ce572085
HG
1177 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1178 dev_err(jrdev, "unable to map dst\n");
06435f34 1179 goto unmap;
ce572085 1180 }
045e3678
YK
1181
1182#ifdef DEBUG
514df281 1183 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1184 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1185#endif
1186
1187 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1188 if (!ret) {
1189 ret = -EINPROGRESS;
1190 } else {
1191 ahash_unmap(jrdev, edesc, req, digestsize);
1192 kfree(edesc);
1193 }
1194
1195 return ret;
06435f34
ME
1196 unmap:
1197 ahash_unmap(jrdev, edesc, req, digestsize);
1198 kfree(edesc);
1199 return -ENOMEM;
1200
045e3678
YK
1201}
1202
1203/* submit ahash update if it the first job descriptor after update */
1204static int ahash_update_no_ctx(struct ahash_request *req)
1205{
1206 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1207 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1208 struct caam_hash_state *state = ahash_request_ctx(req);
1209 struct device *jrdev = ctx->jrdev;
019d62db
HG
1210 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1211 GFP_KERNEL : GFP_ATOMIC;
0355d23d
HG
1212 u8 *buf = current_buf(state);
1213 int *buflen = current_buflen(state);
12b8567f 1214 int blocksize = crypto_ahash_blocksize(ahash);
0355d23d
HG
1215 u8 *next_buf = alt_buf(state);
1216 int *next_buflen = alt_buflen(state);
045e3678 1217 int in_len = *buflen + req->nbytes, to_hash;
bc13c69e 1218 int sec4_sg_bytes, src_nents, mapped_nents;
045e3678 1219 struct ahash_edesc *edesc;
30a43b44 1220 u32 *desc;
045e3678 1221 int ret = 0;
045e3678 1222
12b8567f 1223 *next_buflen = in_len & (blocksize - 1);
045e3678
YK
1224 to_hash = in_len - *next_buflen;
1225
12b8567f
IP
1226 /*
1227 * For XCBC, if to_hash is multiple of block size,
1228 * keep last block in internal buffer
1229 */
1230 if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
1231 (*next_buflen == 0)) {
1232 *next_buflen = blocksize;
1233 to_hash -= blocksize;
1234 }
1235
045e3678 1236 if (to_hash) {
13fb8fd7 1237 src_nents = sg_nents_for_len(req->src,
3d5a2db6 1238 req->nbytes - *next_buflen);
f9970c28
LC
1239 if (src_nents < 0) {
1240 dev_err(jrdev, "Invalid number of src SG.\n");
1241 return src_nents;
1242 }
bc13c69e
RK
1243
1244 if (src_nents) {
1245 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1246 DMA_TO_DEVICE);
1247 if (!mapped_nents) {
1248 dev_err(jrdev, "unable to DMA map source\n");
1249 return -ENOMEM;
1250 }
1251 } else {
1252 mapped_nents = 0;
1253 }
1254
1255 sec4_sg_bytes = (1 + mapped_nents) *
045e3678
YK
1256 sizeof(struct sec4_sg_entry);
1257
1258 /*
1259 * allocate space for base edesc and hw desc commands,
1260 * link tables
1261 */
30a43b44
RK
1262 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1263 ctx->sh_desc_update_first,
1264 ctx->sh_desc_update_first_dma,
1265 flags);
045e3678 1266 if (!edesc) {
bc13c69e 1267 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1268 return -ENOMEM;
1269 }
1270
1271 edesc->src_nents = src_nents;
1272 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 1273
944c3d4d
HG
1274 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1275 if (ret)
1276 goto unmap_ctx;
1277
bc13c69e
RK
1278 sg_to_sec4_sg_last(req->src, mapped_nents,
1279 edesc->sec4_sg + 1, 0);
1280
045e3678 1281 if (*next_buflen) {
307fd543
CS
1282 scatterwalk_map_and_copy(next_buf, req->src,
1283 to_hash - *buflen,
1284 *next_buflen, 0);
045e3678
YK
1285 }
1286
045e3678 1287 desc = edesc->hw_desc;
045e3678 1288
1da2be33
RG
1289 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1290 sec4_sg_bytes,
1291 DMA_TO_DEVICE);
ce572085
HG
1292 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1293 dev_err(jrdev, "unable to map S/G table\n");
32686d34 1294 ret = -ENOMEM;
58b0e5d0 1295 goto unmap_ctx;
ce572085 1296 }
1da2be33 1297
045e3678
YK
1298 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1299
ce572085
HG
1300 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1301 if (ret)
58b0e5d0 1302 goto unmap_ctx;
045e3678
YK
1303
1304#ifdef DEBUG
514df281 1305 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1306 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1307 desc_bytes(desc), 1);
1308#endif
1309
1310 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
32686d34 1311 if (ret)
58b0e5d0 1312 goto unmap_ctx;
32686d34
RK
1313
1314 ret = -EINPROGRESS;
1315 state->update = ahash_update_ctx;
1316 state->finup = ahash_finup_ctx;
1317 state->final = ahash_final_ctx;
045e3678 1318 } else if (*next_buflen) {
307fd543
CS
1319 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1320 req->nbytes, 0);
045e3678
YK
1321 *buflen = *next_buflen;
1322 *next_buflen = 0;
1323 }
1324#ifdef DEBUG
514df281 1325 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
045e3678 1326 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
514df281 1327 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1328 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1329 *next_buflen, 1);
1330#endif
1331
1332 return ret;
58b0e5d0 1333 unmap_ctx:
32686d34
RK
1334 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1335 kfree(edesc);
1336 return ret;
045e3678
YK
1337}
1338
1339/* submit ahash finup if it the first job descriptor after update */
1340static int ahash_finup_no_ctx(struct ahash_request *req)
1341{
1342 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1343 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1344 struct caam_hash_state *state = ahash_request_ctx(req);
1345 struct device *jrdev = ctx->jrdev;
019d62db
HG
1346 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1347 GFP_KERNEL : GFP_ATOMIC;
0355d23d 1348 int buflen = *current_buflen(state);
30a43b44 1349 u32 *desc;
bc13c69e 1350 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
045e3678
YK
1351 int digestsize = crypto_ahash_digestsize(ahash);
1352 struct ahash_edesc *edesc;
9e6df0fd 1353 int ret;
045e3678 1354
13fb8fd7 1355 src_nents = sg_nents_for_len(req->src, req->nbytes);
f9970c28
LC
1356 if (src_nents < 0) {
1357 dev_err(jrdev, "Invalid number of src SG.\n");
1358 return src_nents;
1359 }
bc13c69e
RK
1360
1361 if (src_nents) {
1362 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1363 DMA_TO_DEVICE);
1364 if (!mapped_nents) {
1365 dev_err(jrdev, "unable to DMA map source\n");
1366 return -ENOMEM;
1367 }
1368 } else {
1369 mapped_nents = 0;
1370 }
1371
045e3678 1372 sec4_sg_src_index = 2;
bc13c69e 1373 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
045e3678
YK
1374 sizeof(struct sec4_sg_entry);
1375
1376 /* allocate space for base edesc and hw desc commands, link tables */
30a43b44
RK
1377 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1378 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1379 flags);
045e3678 1380 if (!edesc) {
bc13c69e 1381 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1382 return -ENOMEM;
1383 }
1384
045e3678 1385 desc = edesc->hw_desc;
045e3678
YK
1386
1387 edesc->src_nents = src_nents;
1388 edesc->sec4_sg_bytes = sec4_sg_bytes;
045e3678 1389
944c3d4d
HG
1390 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1391 if (ret)
1392 goto unmap;
045e3678 1393
65cf164a
RK
1394 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1395 req->nbytes);
1396 if (ret) {
ce572085 1397 dev_err(jrdev, "unable to map S/G table\n");
06435f34 1398 goto unmap;
ce572085 1399 }
1da2be33 1400
045e3678
YK
1401 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1402 digestsize);
ce572085
HG
1403 if (dma_mapping_error(jrdev, edesc->dst_dma)) {
1404 dev_err(jrdev, "unable to map dst\n");
06435f34 1405 goto unmap;
ce572085 1406 }
045e3678
YK
1407
1408#ifdef DEBUG
514df281 1409 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1410 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1411#endif
1412
1413 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1414 if (!ret) {
1415 ret = -EINPROGRESS;
1416 } else {
1417 ahash_unmap(jrdev, edesc, req, digestsize);
1418 kfree(edesc);
1419 }
1420
1421 return ret;
06435f34
ME
1422 unmap:
1423 ahash_unmap(jrdev, edesc, req, digestsize);
1424 kfree(edesc);
1425 return -ENOMEM;
1426
045e3678
YK
1427}
1428
1429/* submit first update job descriptor after init */
1430static int ahash_update_first(struct ahash_request *req)
1431{
1432 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1433 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1434 struct caam_hash_state *state = ahash_request_ctx(req);
1435 struct device *jrdev = ctx->jrdev;
019d62db
HG
1436 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1437 GFP_KERNEL : GFP_ATOMIC;
944c3d4d
HG
1438 u8 *next_buf = alt_buf(state);
1439 int *next_buflen = alt_buflen(state);
045e3678 1440 int to_hash;
12b8567f 1441 int blocksize = crypto_ahash_blocksize(ahash);
30a43b44 1442 u32 *desc;
65cf164a 1443 int src_nents, mapped_nents;
045e3678
YK
1444 struct ahash_edesc *edesc;
1445 int ret = 0;
045e3678 1446
12b8567f 1447 *next_buflen = req->nbytes & (blocksize - 1);
045e3678
YK
1448 to_hash = req->nbytes - *next_buflen;
1449
12b8567f
IP
1450 /*
1451 * For XCBC, if to_hash is multiple of block size,
1452 * keep last block in internal buffer
1453 */
1454 if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
1455 (*next_buflen == 0)) {
1456 *next_buflen = blocksize;
1457 to_hash -= blocksize;
1458 }
1459
045e3678 1460 if (to_hash) {
3d5a2db6
RK
1461 src_nents = sg_nents_for_len(req->src,
1462 req->nbytes - *next_buflen);
f9970c28
LC
1463 if (src_nents < 0) {
1464 dev_err(jrdev, "Invalid number of src SG.\n");
1465 return src_nents;
1466 }
bc13c69e
RK
1467
1468 if (src_nents) {
1469 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1470 DMA_TO_DEVICE);
1471 if (!mapped_nents) {
1472 dev_err(jrdev, "unable to map source for DMA\n");
1473 return -ENOMEM;
1474 }
1475 } else {
1476 mapped_nents = 0;
1477 }
045e3678
YK
1478
1479 /*
1480 * allocate space for base edesc and hw desc commands,
1481 * link tables
1482 */
5588d039 1483 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
30a43b44
RK
1484 mapped_nents : 0,
1485 ctx->sh_desc_update_first,
1486 ctx->sh_desc_update_first_dma,
1487 flags);
045e3678 1488 if (!edesc) {
bc13c69e 1489 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
045e3678
YK
1490 return -ENOMEM;
1491 }
1492
1493 edesc->src_nents = src_nents;
045e3678 1494
65cf164a
RK
1495 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1496 to_hash);
1497 if (ret)
58b0e5d0 1498 goto unmap_ctx;
045e3678
YK
1499
1500 if (*next_buflen)
307fd543
CS
1501 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1502 *next_buflen, 0);
045e3678 1503
045e3678 1504 desc = edesc->hw_desc;
045e3678 1505
ce572085
HG
1506 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1507 if (ret)
58b0e5d0 1508 goto unmap_ctx;
045e3678
YK
1509
1510#ifdef DEBUG
514df281 1511 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
045e3678
YK
1512 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1513 desc_bytes(desc), 1);
1514#endif
1515
32686d34
RK
1516 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1517 if (ret)
58b0e5d0 1518 goto unmap_ctx;
32686d34
RK
1519
1520 ret = -EINPROGRESS;
1521 state->update = ahash_update_ctx;
1522 state->finup = ahash_finup_ctx;
1523 state->final = ahash_final_ctx;
045e3678
YK
1524 } else if (*next_buflen) {
1525 state->update = ahash_update_no_ctx;
1526 state->finup = ahash_finup_no_ctx;
1527 state->final = ahash_final_no_ctx;
307fd543
CS
1528 scatterwalk_map_and_copy(next_buf, req->src, 0,
1529 req->nbytes, 0);
944c3d4d 1530 switch_buf(state);
045e3678
YK
1531 }
1532#ifdef DEBUG
514df281 1533 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
045e3678
YK
1534 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1535 *next_buflen, 1);
1536#endif
1537
1538 return ret;
58b0e5d0 1539 unmap_ctx:
32686d34
RK
1540 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1541 kfree(edesc);
1542 return ret;
045e3678
YK
1543}
1544
1545static int ahash_finup_first(struct ahash_request *req)
1546{
1547 return ahash_digest(req);
1548}
1549
1550static int ahash_init(struct ahash_request *req)
1551{
1552 struct caam_hash_state *state = ahash_request_ctx(req);
1553
1554 state->update = ahash_update_first;
1555 state->finup = ahash_finup_first;
1556 state->final = ahash_final_no_ctx;
1557
87ec02e7 1558 state->ctx_dma = 0;
045e3678 1559 state->current_buf = 0;
de0e35ec 1560 state->buf_dma = 0;
6fd4b156
SC
1561 state->buflen_0 = 0;
1562 state->buflen_1 = 0;
045e3678
YK
1563
1564 return 0;
1565}
1566
1567static int ahash_update(struct ahash_request *req)
1568{
1569 struct caam_hash_state *state = ahash_request_ctx(req);
1570
1571 return state->update(req);
1572}
1573
1574static int ahash_finup(struct ahash_request *req)
1575{
1576 struct caam_hash_state *state = ahash_request_ctx(req);
1577
1578 return state->finup(req);
1579}
1580
1581static int ahash_final(struct ahash_request *req)
1582{
1583 struct caam_hash_state *state = ahash_request_ctx(req);
1584
1585 return state->final(req);
1586}
1587
1588static int ahash_export(struct ahash_request *req, void *out)
1589{
045e3678 1590 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831
RK
1591 struct caam_export_state *export = out;
1592 int len;
1593 u8 *buf;
045e3678 1594
5ec90831
RK
1595 if (state->current_buf) {
1596 buf = state->buf_1;
1597 len = state->buflen_1;
1598 } else {
1599 buf = state->buf_0;
f456cd2d 1600 len = state->buflen_0;
5ec90831
RK
1601 }
1602
1603 memcpy(export->buf, buf, len);
1604 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1605 export->buflen = len;
1606 export->update = state->update;
1607 export->final = state->final;
1608 export->finup = state->finup;
434b4212 1609
045e3678
YK
1610 return 0;
1611}
1612
1613static int ahash_import(struct ahash_request *req, const void *in)
1614{
045e3678 1615 struct caam_hash_state *state = ahash_request_ctx(req);
5ec90831 1616 const struct caam_export_state *export = in;
045e3678 1617
5ec90831
RK
1618 memset(state, 0, sizeof(*state));
1619 memcpy(state->buf_0, export->buf, export->buflen);
1620 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1621 state->buflen_0 = export->buflen;
1622 state->update = export->update;
1623 state->final = export->final;
1624 state->finup = export->finup;
434b4212 1625
045e3678
YK
1626 return 0;
1627}
1628
1629struct caam_hash_template {
1630 char name[CRYPTO_MAX_ALG_NAME];
1631 char driver_name[CRYPTO_MAX_ALG_NAME];
b0e09bae
YK
1632 char hmac_name[CRYPTO_MAX_ALG_NAME];
1633 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
045e3678
YK
1634 unsigned int blocksize;
1635 struct ahash_alg template_ahash;
1636 u32 alg_type;
045e3678
YK
1637};
1638
1639/* ahash descriptors */
1640static struct caam_hash_template driver_hash[] = {
1641 {
b0e09bae
YK
1642 .name = "sha1",
1643 .driver_name = "sha1-caam",
1644 .hmac_name = "hmac(sha1)",
1645 .hmac_driver_name = "hmac-sha1-caam",
045e3678
YK
1646 .blocksize = SHA1_BLOCK_SIZE,
1647 .template_ahash = {
1648 .init = ahash_init,
1649 .update = ahash_update,
1650 .final = ahash_final,
1651 .finup = ahash_finup,
1652 .digest = ahash_digest,
1653 .export = ahash_export,
1654 .import = ahash_import,
1655 .setkey = ahash_setkey,
1656 .halg = {
1657 .digestsize = SHA1_DIGEST_SIZE,
5ec90831 1658 .statesize = sizeof(struct caam_export_state),
045e3678 1659 },
659f313d 1660 },
045e3678 1661 .alg_type = OP_ALG_ALGSEL_SHA1,
045e3678 1662 }, {
b0e09bae
YK
1663 .name = "sha224",
1664 .driver_name = "sha224-caam",
1665 .hmac_name = "hmac(sha224)",
1666 .hmac_driver_name = "hmac-sha224-caam",
045e3678
YK
1667 .blocksize = SHA224_BLOCK_SIZE,
1668 .template_ahash = {
1669 .init = ahash_init,
1670 .update = ahash_update,
1671 .final = ahash_final,
1672 .finup = ahash_finup,
1673 .digest = ahash_digest,
1674 .export = ahash_export,
1675 .import = ahash_import,
1676 .setkey = ahash_setkey,
1677 .halg = {
1678 .digestsize = SHA224_DIGEST_SIZE,
5ec90831 1679 .statesize = sizeof(struct caam_export_state),
045e3678 1680 },
659f313d 1681 },
045e3678 1682 .alg_type = OP_ALG_ALGSEL_SHA224,
045e3678 1683 }, {
b0e09bae
YK
1684 .name = "sha256",
1685 .driver_name = "sha256-caam",
1686 .hmac_name = "hmac(sha256)",
1687 .hmac_driver_name = "hmac-sha256-caam",
045e3678
YK
1688 .blocksize = SHA256_BLOCK_SIZE,
1689 .template_ahash = {
1690 .init = ahash_init,
1691 .update = ahash_update,
1692 .final = ahash_final,
1693 .finup = ahash_finup,
1694 .digest = ahash_digest,
1695 .export = ahash_export,
1696 .import = ahash_import,
1697 .setkey = ahash_setkey,
1698 .halg = {
1699 .digestsize = SHA256_DIGEST_SIZE,
5ec90831 1700 .statesize = sizeof(struct caam_export_state),
045e3678 1701 },
659f313d 1702 },
045e3678 1703 .alg_type = OP_ALG_ALGSEL_SHA256,
045e3678 1704 }, {
b0e09bae
YK
1705 .name = "sha384",
1706 .driver_name = "sha384-caam",
1707 .hmac_name = "hmac(sha384)",
1708 .hmac_driver_name = "hmac-sha384-caam",
045e3678
YK
1709 .blocksize = SHA384_BLOCK_SIZE,
1710 .template_ahash = {
1711 .init = ahash_init,
1712 .update = ahash_update,
1713 .final = ahash_final,
1714 .finup = ahash_finup,
1715 .digest = ahash_digest,
1716 .export = ahash_export,
1717 .import = ahash_import,
1718 .setkey = ahash_setkey,
1719 .halg = {
1720 .digestsize = SHA384_DIGEST_SIZE,
5ec90831 1721 .statesize = sizeof(struct caam_export_state),
045e3678 1722 },
659f313d 1723 },
045e3678 1724 .alg_type = OP_ALG_ALGSEL_SHA384,
045e3678 1725 }, {
b0e09bae
YK
1726 .name = "sha512",
1727 .driver_name = "sha512-caam",
1728 .hmac_name = "hmac(sha512)",
1729 .hmac_driver_name = "hmac-sha512-caam",
045e3678
YK
1730 .blocksize = SHA512_BLOCK_SIZE,
1731 .template_ahash = {
1732 .init = ahash_init,
1733 .update = ahash_update,
1734 .final = ahash_final,
1735 .finup = ahash_finup,
1736 .digest = ahash_digest,
1737 .export = ahash_export,
1738 .import = ahash_import,
1739 .setkey = ahash_setkey,
1740 .halg = {
1741 .digestsize = SHA512_DIGEST_SIZE,
5ec90831 1742 .statesize = sizeof(struct caam_export_state),
045e3678 1743 },
659f313d 1744 },
045e3678 1745 .alg_type = OP_ALG_ALGSEL_SHA512,
045e3678 1746 }, {
b0e09bae
YK
1747 .name = "md5",
1748 .driver_name = "md5-caam",
1749 .hmac_name = "hmac(md5)",
1750 .hmac_driver_name = "hmac-md5-caam",
045e3678
YK
1751 .blocksize = MD5_BLOCK_WORDS * 4,
1752 .template_ahash = {
1753 .init = ahash_init,
1754 .update = ahash_update,
1755 .final = ahash_final,
1756 .finup = ahash_finup,
1757 .digest = ahash_digest,
1758 .export = ahash_export,
1759 .import = ahash_import,
1760 .setkey = ahash_setkey,
1761 .halg = {
1762 .digestsize = MD5_DIGEST_SIZE,
5ec90831 1763 .statesize = sizeof(struct caam_export_state),
045e3678 1764 },
659f313d 1765 },
045e3678 1766 .alg_type = OP_ALG_ALGSEL_MD5,
12b8567f
IP
1767 }, {
1768 .hmac_name = "xcbc(aes)",
1769 .hmac_driver_name = "xcbc-aes-caam",
1770 .blocksize = AES_BLOCK_SIZE,
1771 .template_ahash = {
1772 .init = ahash_init,
1773 .update = ahash_update,
1774 .final = ahash_final,
1775 .finup = ahash_finup,
1776 .digest = ahash_digest,
1777 .export = ahash_export,
1778 .import = ahash_import,
1779 .setkey = axcbc_setkey,
1780 .halg = {
1781 .digestsize = AES_BLOCK_SIZE,
1782 .statesize = sizeof(struct caam_export_state),
1783 },
1784 },
1785 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
045e3678
YK
1786 },
1787};
1788
1789struct caam_hash_alg {
1790 struct list_head entry;
045e3678 1791 int alg_type;
045e3678
YK
1792 struct ahash_alg ahash_alg;
1793};
1794
1795static int caam_hash_cra_init(struct crypto_tfm *tfm)
1796{
1797 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1798 struct crypto_alg *base = tfm->__crt_alg;
1799 struct hash_alg_common *halg =
1800 container_of(base, struct hash_alg_common, base);
1801 struct ahash_alg *alg =
1802 container_of(halg, struct ahash_alg, halg);
1803 struct caam_hash_alg *caam_hash =
1804 container_of(alg, struct caam_hash_alg, ahash_alg);
1805 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
045e3678
YK
1806 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1807 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1808 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1809 HASH_MSG_LEN + 32,
1810 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1811 HASH_MSG_LEN + 64,
1812 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
bbf22344 1813 dma_addr_t dma_addr;
7e0880b9 1814 struct caam_drv_private *priv;
045e3678
YK
1815
1816 /*
cfc6f11b 1817 * Get a Job ring from Job Ring driver to ensure in-order
045e3678
YK
1818 * crypto request processing per tfm
1819 */
cfc6f11b
RG
1820 ctx->jrdev = caam_jr_alloc();
1821 if (IS_ERR(ctx->jrdev)) {
1822 pr_err("Job Ring Device allocation for transform failed\n");
1823 return PTR_ERR(ctx->jrdev);
1824 }
bbf22344 1825
7e0880b9 1826 priv = dev_get_drvdata(ctx->jrdev->parent);
12b8567f
IP
1827
1828 if (is_xcbc_aes(caam_hash->alg_type)) {
1829 ctx->dir = DMA_TO_DEVICE;
1830 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1831 ctx->ctx_len = 48;
1832
1833 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1834 ARRAY_SIZE(ctx->key),
1835 DMA_BIDIRECTIONAL,
1836 DMA_ATTR_SKIP_CPU_SYNC);
1837 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
1838 dev_err(ctx->jrdev, "unable to map key\n");
1839 caam_jr_free(ctx->jrdev);
1840 return -ENOMEM;
1841 }
1842 } else {
1843 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1844 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1845 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1846 OP_ALG_ALGSEL_SUBMASK) >>
1847 OP_ALG_ALGSEL_SHIFT];
1848 }
7e0880b9 1849
bbf22344
HG
1850 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1851 offsetof(struct caam_hash_ctx,
1852 sh_desc_update_dma),
7e0880b9 1853 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
bbf22344
HG
1854 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1855 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
12b8567f
IP
1856
1857 if (is_xcbc_aes(caam_hash->alg_type))
1858 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1859 ARRAY_SIZE(ctx->key),
1860 DMA_BIDIRECTIONAL,
1861 DMA_ATTR_SKIP_CPU_SYNC);
1862
bbf22344
HG
1863 caam_jr_free(ctx->jrdev);
1864 return -ENOMEM;
1865 }
1866
1867 ctx->sh_desc_update_dma = dma_addr;
1868 ctx->sh_desc_update_first_dma = dma_addr +
1869 offsetof(struct caam_hash_ctx,
1870 sh_desc_update_first);
1871 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1872 sh_desc_fin);
1873 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1874 sh_desc_digest);
1875
045e3678
YK
1876 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1877 sizeof(struct caam_hash_state));
9a2537d0
IP
1878
1879 /*
1880 * For keyed hash algorithms shared descriptors
1881 * will be created later in setkey() callback
1882 */
1883 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
045e3678
YK
1884}
1885
1886static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1887{
1888 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1889
bbf22344 1890 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
12b8567f 1891 offsetof(struct caam_hash_ctx, key),
7e0880b9 1892 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
12b8567f
IP
1893 if (is_xcbc_aes(ctx->adata.algtype))
1894 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1895 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
1896 DMA_ATTR_SKIP_CPU_SYNC);
cfc6f11b 1897 caam_jr_free(ctx->jrdev);
045e3678
YK
1898}
1899
1900static void __exit caam_algapi_hash_exit(void)
1901{
045e3678
YK
1902 struct caam_hash_alg *t_alg, *n;
1903
cfc6f11b 1904 if (!hash_list.next)
045e3678
YK
1905 return;
1906
cfc6f11b 1907 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
045e3678
YK
1908 crypto_unregister_ahash(&t_alg->ahash_alg);
1909 list_del(&t_alg->entry);
1910 kfree(t_alg);
1911 }
1912}
1913
1914static struct caam_hash_alg *
cfc6f11b 1915caam_hash_alloc(struct caam_hash_template *template,
b0e09bae 1916 bool keyed)
045e3678
YK
1917{
1918 struct caam_hash_alg *t_alg;
1919 struct ahash_alg *halg;
1920 struct crypto_alg *alg;
1921
9c4f9733 1922 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
045e3678 1923 if (!t_alg) {
cfc6f11b 1924 pr_err("failed to allocate t_alg\n");
045e3678
YK
1925 return ERR_PTR(-ENOMEM);
1926 }
1927
1928 t_alg->ahash_alg = template->template_ahash;
1929 halg = &t_alg->ahash_alg;
1930 alg = &halg->halg.base;
1931
b0e09bae
YK
1932 if (keyed) {
1933 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1934 template->hmac_name);
1935 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1936 template->hmac_driver_name);
1937 } else {
1938 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1939 template->name);
1940 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1941 template->driver_name);
a0118c8b 1942 t_alg->ahash_alg.setkey = NULL;
b0e09bae 1943 }
045e3678
YK
1944 alg->cra_module = THIS_MODULE;
1945 alg->cra_init = caam_hash_cra_init;
1946 alg->cra_exit = caam_hash_cra_exit;
1947 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1948 alg->cra_priority = CAAM_CRA_PRIORITY;
1949 alg->cra_blocksize = template->blocksize;
1950 alg->cra_alignmask = 0;
6a38f622 1951 alg->cra_flags = CRYPTO_ALG_ASYNC;
045e3678
YK
1952
1953 t_alg->alg_type = template->alg_type;
045e3678
YK
1954
1955 return t_alg;
1956}
1957
1958static int __init caam_algapi_hash_init(void)
1959{
35af6403
RG
1960 struct device_node *dev_node;
1961 struct platform_device *pdev;
1962 struct device *ctrldev;
045e3678 1963 int i = 0, err = 0;
bf83490e
VM
1964 struct caam_drv_private *priv;
1965 unsigned int md_limit = SHA512_DIGEST_SIZE;
d239b10d 1966 u32 md_inst, md_vid;
045e3678 1967
35af6403
RG
1968 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1969 if (!dev_node) {
1970 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1971 if (!dev_node)
1972 return -ENODEV;
1973 }
1974
1975 pdev = of_find_device_by_node(dev_node);
1976 if (!pdev) {
1977 of_node_put(dev_node);
1978 return -ENODEV;
1979 }
1980
1981 ctrldev = &pdev->dev;
1982 priv = dev_get_drvdata(ctrldev);
1983 of_node_put(dev_node);
1984
1985 /*
1986 * If priv is NULL, it's probably because the caam driver wasn't
1987 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1988 */
1989 if (!priv)
1990 return -ENODEV;
1991
bf83490e
VM
1992 /*
1993 * Register crypto algorithms the device supports. First, identify
1994 * presence and attributes of MD block.
1995 */
d239b10d
HG
1996 if (priv->era < 10) {
1997 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1998 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1999 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
2000 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2001 } else {
2002 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2003
2004 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2005 md_inst = mdha & CHA_VER_NUM_MASK;
2006 }
bf83490e
VM
2007
2008 /*
2009 * Skip registration of any hashing algorithms if MD block
2010 * is not present.
2011 */
d239b10d 2012 if (!md_inst)
bf83490e
VM
2013 return -ENODEV;
2014
2015 /* Limit digest size based on LP256 */
d239b10d 2016 if (md_vid == CHA_VER_VID_MD_LP256)
bf83490e
VM
2017 md_limit = SHA256_DIGEST_SIZE;
2018
cfc6f11b 2019 INIT_LIST_HEAD(&hash_list);
045e3678
YK
2020
2021 /* register crypto algorithms the device supports */
2022 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
045e3678 2023 struct caam_hash_alg *t_alg;
bf83490e
VM
2024 struct caam_hash_template *alg = driver_hash + i;
2025
2026 /* If MD size is not supported by device, skip registration */
12b8567f
IP
2027 if (is_mdha(alg->alg_type) &&
2028 alg->template_ahash.halg.digestsize > md_limit)
bf83490e 2029 continue;
045e3678 2030
b0e09bae 2031 /* register hmac version */
bf83490e 2032 t_alg = caam_hash_alloc(alg, true);
b0e09bae
YK
2033 if (IS_ERR(t_alg)) {
2034 err = PTR_ERR(t_alg);
0f103b37
IP
2035 pr_warn("%s alg allocation failed\n",
2036 alg->hmac_driver_name);
b0e09bae
YK
2037 continue;
2038 }
2039
2040 err = crypto_register_ahash(&t_alg->ahash_alg);
2041 if (err) {
6ea30f0a
RK
2042 pr_warn("%s alg registration failed: %d\n",
2043 t_alg->ahash_alg.halg.base.cra_driver_name,
2044 err);
b0e09bae
YK
2045 kfree(t_alg);
2046 } else
cfc6f11b 2047 list_add_tail(&t_alg->entry, &hash_list);
b0e09bae 2048
12b8567f
IP
2049 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2050 continue;
2051
b0e09bae 2052 /* register unkeyed version */
bf83490e 2053 t_alg = caam_hash_alloc(alg, false);
045e3678
YK
2054 if (IS_ERR(t_alg)) {
2055 err = PTR_ERR(t_alg);
bf83490e 2056 pr_warn("%s alg allocation failed\n", alg->driver_name);
045e3678
YK
2057 continue;
2058 }
2059
2060 err = crypto_register_ahash(&t_alg->ahash_alg);
2061 if (err) {
6ea30f0a
RK
2062 pr_warn("%s alg registration failed: %d\n",
2063 t_alg->ahash_alg.halg.base.cra_driver_name,
2064 err);
045e3678
YK
2065 kfree(t_alg);
2066 } else
cfc6f11b 2067 list_add_tail(&t_alg->entry, &hash_list);
045e3678
YK
2068 }
2069
2070 return err;
2071}
2072
2073module_init(caam_algapi_hash_init);
2074module_exit(caam_algapi_hash_exit);
2075
2076MODULE_LICENSE("GPL");
2077MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
2078MODULE_AUTHOR("Freescale Semiconductor - NMG");