]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/marvell/hash.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / marvell / hash.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
f63601fd
BB
2/*
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
f63601fd
BB
10 */
11
d477d813 12#include <crypto/hmac.h>
7aeef693 13#include <crypto/md5.h>
f63601fd
BB
14#include <crypto/sha.h>
15
16#include "cesa.h"
17
db509a45
BB
18struct mv_cesa_ahash_dma_iter {
19 struct mv_cesa_dma_iter base;
20 struct mv_cesa_sg_dma_iter src;
21};
22
23static inline void
24mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
25 struct ahash_request *req)
26{
27 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bd274b10 28 unsigned int len = req->nbytes + creq->cache_ptr;
db509a45
BB
29
30 if (!creq->last_req)
bd274b10 31 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
db509a45
BB
32
33 mv_cesa_req_dma_iter_init(&iter->base, len);
34 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
35 iter->src.op_offset = creq->cache_ptr;
36}
37
38static inline bool
39mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
40{
41 iter->src.op_offset = 0;
42
43 return mv_cesa_req_dma_iter_next_op(&iter->base);
44}
45
7850c91b
BB
46static inline int
47mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
db509a45 48{
7850c91b
BB
49 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
50 &req->cache_dma);
51 if (!req->cache)
f63601fd
BB
52 return -ENOMEM;
53
54 return 0;
55}
56
7850c91b
BB
57static inline void
58mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
f63601fd 59{
7850c91b 60 if (!req->cache)
f63601fd
BB
61 return;
62
7850c91b
BB
63 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
64 req->cache_dma);
f63601fd
BB
65}
66
db509a45
BB
67static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
68 gfp_t flags)
69{
70 if (req->padding)
71 return 0;
72
73 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
74 &req->padding_dma);
75 if (!req->padding)
76 return -ENOMEM;
77
78 return 0;
79}
80
81static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
82{
83 if (!req->padding)
84 return;
85
86 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
87 req->padding_dma);
88 req->padding = NULL;
89}
90
91static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
92{
93 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
94
95 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
96}
97
98static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
99{
100 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
101
102 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
7850c91b 103 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
53da740f 104 mv_cesa_dma_cleanup(&creq->base);
db509a45
BB
105}
106
107static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
108{
109 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
110
53da740f 111 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45
BB
112 mv_cesa_ahash_dma_cleanup(req);
113}
114
f63601fd
BB
115static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
116{
117 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
118
53da740f 119 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45 120 mv_cesa_ahash_dma_last_cleanup(req);
f63601fd
BB
121}
122
123static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
124{
125 unsigned int index, padlen;
126
127 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
128 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
129
130 return padlen;
131}
132
133static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
134{
d3154977 135 unsigned int padlen;
f63601fd
BB
136
137 buf[0] = 0x80;
138 /* Pad out to 56 mod 64 */
f63601fd
BB
139 padlen = mv_cesa_ahash_pad_len(creq);
140 memset(buf + 1, 0, padlen - 1);
51954a96
RK
141
142 if (creq->algo_le) {
143 __le64 bits = cpu_to_le64(creq->len << 3);
144 memcpy(buf + padlen, &bits, sizeof(bits));
145 } else {
146 __be64 bits = cpu_to_be64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
148 }
f63601fd
BB
149
150 return padlen + 8;
151}
152
153static void mv_cesa_ahash_std_step(struct ahash_request *req)
154{
155 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
156 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
53da740f 157 struct mv_cesa_engine *engine = creq->base.engine;
f63601fd
BB
158 struct mv_cesa_op_ctx *op;
159 unsigned int new_cache_ptr = 0;
160 u32 frag_mode;
161 size_t len;
2786cee8
RP
162 unsigned int digsize;
163 int i;
164
165 mv_cesa_adjust_op(engine, &creq->op_tmpl);
166 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
167
9e5f7a14
RP
168 if (!sreq->offset) {
169 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
170 for (i = 0; i < digsize / 4; i++)
171 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
172 }
f63601fd
BB
173
174 if (creq->cache_ptr)
0f3304dc
RK
175 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
176 creq->cache, creq->cache_ptr);
f63601fd
BB
177
178 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
179 CESA_SA_SRAM_PAYLOAD_SIZE);
180
181 if (!creq->last_req) {
182 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
183 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
184 }
185
186 if (len - creq->cache_ptr)
187 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
188 engine->sram +
189 CESA_SA_DATA_SRAM_OFFSET +
190 creq->cache_ptr,
191 len - creq->cache_ptr,
192 sreq->offset);
193
194 op = &creq->op_tmpl;
195
196 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
197
198 if (creq->last_req && sreq->offset == req->nbytes &&
199 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
200 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
201 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
202 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
203 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
204 }
205
206 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
207 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
208 if (len &&
209 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
210 mv_cesa_set_mac_op_total_len(op, creq->len);
211 } else {
212 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
213
214 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
215 len &= CESA_HASH_BLOCK_SIZE_MSK;
216 new_cache_ptr = 64 - trailerlen;
0f3304dc
RK
217 memcpy_fromio(creq->cache,
218 engine->sram +
219 CESA_SA_DATA_SRAM_OFFSET + len,
220 new_cache_ptr);
f63601fd
BB
221 } else {
222 len += mv_cesa_ahash_pad_req(creq,
223 engine->sram + len +
224 CESA_SA_DATA_SRAM_OFFSET);
225 }
226
227 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
228 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
229 else
230 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
231 }
232 }
233
234 mv_cesa_set_mac_op_frag_len(op, len);
235 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
236
237 /* FIXME: only update enc_len field */
0f3304dc 238 memcpy_toio(engine->sram, op, sizeof(*op));
f63601fd
BB
239
240 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
241 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
242 CESA_SA_DESC_CFG_FRAG_MSK);
243
244 creq->cache_ptr = new_cache_ptr;
245
246 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
b1508561 247 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
f6283088
RP
248 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
249 CESA_SA_CMD_EN_CESA_SA_ACCL0);
f63601fd
BB
250 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
251}
252
253static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
254{
255 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
256 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
257
258 if (sreq->offset < (req->nbytes - creq->cache_ptr))
259 return -EINPROGRESS;
260
261 return 0;
262}
263
db509a45
BB
264static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
265{
266 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
53da740f 267 struct mv_cesa_req *basereq = &creq->base;
db509a45 268
53da740f 269 mv_cesa_dma_prepare(basereq, basereq->engine);
db509a45
BB
270}
271
f63601fd
BB
272static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
273{
274 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
275 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
f63601fd
BB
276
277 sreq->offset = 0;
f63601fd
BB
278}
279
8759fec4
RP
280static void mv_cesa_ahash_dma_step(struct ahash_request *req)
281{
282 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
283 struct mv_cesa_req *base = &creq->base;
284
285 /* We must explicitly set the digest state. */
286 if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
287 struct mv_cesa_engine *engine = base->engine;
288 int i;
289
290 /* Set the hash state in the IVDIG regs. */
291 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
292 writel_relaxed(creq->state[i], engine->regs +
293 CESA_IVDIG(i));
294 }
295
296 mv_cesa_dma_step(base);
297}
298
f63601fd
BB
299static void mv_cesa_ahash_step(struct crypto_async_request *req)
300{
301 struct ahash_request *ahashreq = ahash_request_cast(req);
db509a45 302 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 303
53da740f 304 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
8759fec4 305 mv_cesa_ahash_dma_step(ahashreq);
db509a45
BB
306 else
307 mv_cesa_ahash_std_step(ahashreq);
f63601fd
BB
308}
309
310static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
311{
312 struct ahash_request *ahashreq = ahash_request_cast(req);
313 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 314
53da740f 315 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
1bf6682c 316 return mv_cesa_dma_process(&creq->base, status);
db509a45 317
1bf6682c
RP
318 return mv_cesa_ahash_std_process(ahashreq, status);
319}
320
321static void mv_cesa_ahash_complete(struct crypto_async_request *req)
322{
323 struct ahash_request *ahashreq = ahash_request_cast(req);
324 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
325 struct mv_cesa_engine *engine = creq->base.engine;
326 unsigned int digsize;
327 int i;
f63601fd
BB
328
329 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
f63601fd 330
f34dad17
RP
331 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
332 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
333 __le32 *data = NULL;
334
4c2b130c 335 /*
f34dad17
RP
336 * Result is already in the correct endianess when the SA is
337 * used
4c2b130c 338 */
f34dad17
RP
339 data = creq->base.chain.last->op->ctx.hash.hash;
340 for (i = 0; i < digsize / 4; i++)
341 creq->state[i] = cpu_to_le32(data[i]);
4c2b130c 342
f34dad17
RP
343 memcpy(ahashreq->result, data, digsize);
344 } else {
345 for (i = 0; i < digsize / 4; i++)
346 creq->state[i] = readl_relaxed(engine->regs +
347 CESA_IVDIG(i));
348 if (creq->last_req) {
349 /*
350 * Hardware's MD5 digest is in little endian format, but
351 * SHA in big endian format
352 */
353 if (creq->algo_le) {
354 __le32 *result = (void *)ahashreq->result;
355
356 for (i = 0; i < digsize / 4; i++)
357 result[i] = cpu_to_le32(creq->state[i]);
358 } else {
359 __be32 *result = (void *)ahashreq->result;
f63601fd 360
f34dad17
RP
361 for (i = 0; i < digsize / 4; i++)
362 result[i] = cpu_to_be32(creq->state[i]);
363 }
4c2b130c 364 }
f63601fd 365 }
bf8f91e7
RP
366
367 atomic_sub(ahashreq->nbytes, &engine->load);
f63601fd
BB
368}
369
370static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
371 struct mv_cesa_engine *engine)
372{
373 struct ahash_request *ahashreq = ahash_request_cast(req);
374 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
f63601fd 375
53da740f 376 creq->base.engine = engine;
f63601fd 377
53da740f 378 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
db509a45
BB
379 mv_cesa_ahash_dma_prepare(ahashreq);
380 else
381 mv_cesa_ahash_std_prepare(ahashreq);
f63601fd
BB
382}
383
384static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
385{
386 struct ahash_request *ahashreq = ahash_request_cast(req);
387 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
388
389 if (creq->last_req)
390 mv_cesa_ahash_last_cleanup(ahashreq);
db509a45
BB
391
392 mv_cesa_ahash_cleanup(ahashreq);
64ec6ccb
RP
393
394 if (creq->cache_ptr)
395 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
396 creq->cache,
397 creq->cache_ptr,
398 ahashreq->nbytes - creq->cache_ptr);
f63601fd
BB
399}
400
401static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
402 .step = mv_cesa_ahash_step,
403 .process = mv_cesa_ahash_process,
f63601fd 404 .cleanup = mv_cesa_ahash_req_cleanup,
1bf6682c 405 .complete = mv_cesa_ahash_complete,
f63601fd
BB
406};
407
3e5c66c9 408static void mv_cesa_ahash_init(struct ahash_request *req,
a9eb678f 409 struct mv_cesa_op_ctx *tmpl, bool algo_le)
f63601fd
BB
410{
411 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
412
413 memset(creq, 0, sizeof(*creq));
414 mv_cesa_update_op_cfg(tmpl,
415 CESA_SA_DESC_CFG_OP_MAC_ONLY |
416 CESA_SA_DESC_CFG_FIRST_FRAG,
417 CESA_SA_DESC_CFG_OP_MSK |
418 CESA_SA_DESC_CFG_FRAG_MSK);
419 mv_cesa_set_mac_op_total_len(tmpl, 0);
420 mv_cesa_set_mac_op_frag_len(tmpl, 0);
421 creq->op_tmpl = *tmpl;
422 creq->len = 0;
a9eb678f 423 creq->algo_le = algo_le;
f63601fd
BB
424}
425
426static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
427{
428 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
429
430 ctx->base.ops = &mv_cesa_ahash_req_ops;
431
432 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
433 sizeof(struct mv_cesa_ahash_req));
434 return 0;
435}
436
6dc156f4 437static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
f63601fd
BB
438{
439 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
6dc156f4 440 bool cached = false;
f63601fd 441
47856204 442 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
6dc156f4 443 cached = true;
f63601fd
BB
444
445 if (!req->nbytes)
6dc156f4 446 return cached;
f63601fd
BB
447
448 sg_pcopy_to_buffer(req->src, creq->src_nents,
449 creq->cache + creq->cache_ptr,
450 req->nbytes, 0);
451
452 creq->cache_ptr += req->nbytes;
453 }
454
6dc156f4 455 return cached;
f63601fd
BB
456}
457
db509a45 458static struct mv_cesa_op_ctx *
96212886
RK
459mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
460 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
461 gfp_t flags)
db509a45 462{
96212886 463 struct mv_cesa_op_ctx *op;
db509a45
BB
464 int ret;
465
96212886
RK
466 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
467 if (IS_ERR(op))
468 return op;
db509a45 469
96212886
RK
470 /* Set the operation block fragment length. */
471 mv_cesa_set_mac_op_frag_len(op, frag_len);
472
473 /* Append dummy desc to launch operation */
474 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
db509a45
BB
475 if (ret)
476 return ERR_PTR(ret);
477
2f396a91
RK
478 if (mv_cesa_mac_op_is_first_frag(tmpl))
479 mv_cesa_update_op_cfg(tmpl,
480 CESA_SA_DESC_CFG_MID_FRAG,
481 CESA_SA_DESC_CFG_FRAG_MSK);
db509a45
BB
482
483 return op;
484}
485
0971d09a 486static int
db509a45 487mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
db509a45
BB
488 struct mv_cesa_ahash_req *creq,
489 gfp_t flags)
db509a45 490{
db509a45 491 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
7850c91b 492 int ret;
db509a45 493
db509a45 494 if (!creq->cache_ptr)
0971d09a 495 return 0;
db509a45 496
7850c91b
BB
497 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
498 if (ret)
499 return ret;
500
501 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
502
0971d09a
RK
503 return mv_cesa_dma_add_data_transfer(chain,
504 CESA_SA_DATA_SRAM_OFFSET,
505 ahashdreq->cache_dma,
506 creq->cache_ptr,
507 CESA_TDMA_DST_IN_SRAM,
508 flags);
db509a45
BB
509}
510
511static struct mv_cesa_op_ctx *
512mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
513 struct mv_cesa_ahash_dma_iter *dma_iter,
514 struct mv_cesa_ahash_req *creq,
58953e15 515 unsigned int frag_len, gfp_t flags)
db509a45
BB
516{
517 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
518 unsigned int len, trailerlen, padoff = 0;
58953e15 519 struct mv_cesa_op_ctx *op;
db509a45
BB
520 int ret;
521
aee84a7e
RK
522 /*
523 * If the transfer is smaller than our maximum length, and we have
524 * some data outstanding, we can ask the engine to finish the hash.
525 */
526 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
58953e15
RK
527 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
528 flags);
529 if (IS_ERR(op))
530 return op;
db509a45 531
aee84a7e
RK
532 mv_cesa_set_mac_op_total_len(op, creq->len);
533 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
534 CESA_SA_DESC_CFG_NOT_FRAG :
535 CESA_SA_DESC_CFG_LAST_FRAG,
536 CESA_SA_DESC_CFG_FRAG_MSK);
db509a45 537
f34dad17
RP
538 ret = mv_cesa_dma_add_result_op(chain,
539 CESA_SA_CFG_SRAM_OFFSET,
540 CESA_SA_DATA_SRAM_OFFSET,
541 CESA_TDMA_SRC_IN_SRAM, flags);
542 if (ret)
543 return ERR_PTR(-ENOMEM);
db509a45
BB
544 return op;
545 }
546
aee84a7e
RK
547 /*
548 * The request is longer than the engine can handle, or we have
549 * no data outstanding. Manually generate the padding, adding it
550 * as a "mid" fragment.
551 */
db509a45
BB
552 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
553 if (ret)
554 return ERR_PTR(ret);
555
556 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
557
ab270e70
RK
558 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
559 if (len) {
560 ret = mv_cesa_dma_add_data_transfer(chain,
db509a45 561 CESA_SA_DATA_SRAM_OFFSET +
ab270e70 562 frag_len,
db509a45
BB
563 ahashdreq->padding_dma,
564 len, CESA_TDMA_DST_IN_SRAM,
565 flags);
ab270e70
RK
566 if (ret)
567 return ERR_PTR(ret);
db509a45 568
ab270e70
RK
569 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
570 flags);
571 if (IS_ERR(op))
572 return op;
db509a45 573
ab270e70
RK
574 if (len == trailerlen)
575 return op;
db509a45 576
ab270e70
RK
577 padoff += len;
578 }
db509a45
BB
579
580 ret = mv_cesa_dma_add_data_transfer(chain,
581 CESA_SA_DATA_SRAM_OFFSET,
582 ahashdreq->padding_dma +
583 padoff,
584 trailerlen - padoff,
585 CESA_TDMA_DST_IN_SRAM,
586 flags);
587 if (ret)
588 return ERR_PTR(ret);
589
96212886
RK
590 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
591 flags);
db509a45
BB
592}
593
594static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
595{
596 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
597 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
598 GFP_KERNEL : GFP_ATOMIC;
53da740f 599 struct mv_cesa_req *basereq = &creq->base;
db509a45
BB
600 struct mv_cesa_ahash_dma_iter iter;
601 struct mv_cesa_op_ctx *op = NULL;
e41bbebd 602 unsigned int frag_len;
8759fec4 603 bool set_state = false;
db509a45 604 int ret;
f34dad17 605 u32 type;
db509a45 606
53da740f
RP
607 basereq->chain.first = NULL;
608 basereq->chain.last = NULL;
db509a45 609
8759fec4
RP
610 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
611 set_state = true;
612
db509a45
BB
613 if (creq->src_nents) {
614 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
615 DMA_TO_DEVICE);
616 if (!ret) {
617 ret = -ENOMEM;
618 goto err;
619 }
620 }
621
53da740f 622 mv_cesa_tdma_desc_iter_init(&basereq->chain);
db509a45
BB
623 mv_cesa_ahash_req_iter_init(&iter, req);
624
0971d09a
RK
625 /*
626 * Add the cache (left-over data from a previous block) first.
627 * This will never overflow the SRAM size.
628 */
2a8a7857 629 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
0971d09a 630 if (ret)
db509a45 631 goto err_free_tdma;
db509a45 632
d9bba4c3
RK
633 if (iter.src.sg) {
634 /*
635 * Add all the new data, inserting an operation block and
636 * launch command between each full SRAM block-worth of
e41bbebd 637 * data. We intentionally do not add the final op block.
d9bba4c3 638 */
e41bbebd 639 while (true) {
53da740f 640 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
8c07f3a8 641 &iter.base,
d9bba4c3
RK
642 &iter.src, flags);
643 if (ret)
644 goto err_free_tdma;
645
e41bbebd 646 frag_len = iter.base.op_len;
db509a45 647
e41bbebd
RK
648 if (!mv_cesa_ahash_req_iter_next_op(&iter))
649 break;
650
53da740f 651 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
e41bbebd 652 frag_len, flags);
d9bba4c3
RK
653 if (IS_ERR(op)) {
654 ret = PTR_ERR(op);
655 goto err_free_tdma;
656 }
db509a45 657 }
e41bbebd 658 } else {
d9bba4c3 659 /* Account for the data that was in the cache. */
e41bbebd
RK
660 frag_len = iter.base.op_len;
661 }
662
58953e15
RK
663 /*
664 * At this point, frag_len indicates whether we have any data
665 * outstanding which needs an operation. Queue up the final
666 * operation, which depends whether this is the final request.
667 */
668 if (creq->last_req)
53da740f 669 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
8c07f3a8 670 frag_len, flags);
58953e15 671 else if (frag_len)
53da740f 672 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
8c07f3a8 673 frag_len, flags);
db509a45 674
db509a45
BB
675 if (IS_ERR(op)) {
676 ret = PTR_ERR(op);
677 goto err_free_tdma;
678 }
679
f34dad17
RP
680 /*
681 * If results are copied via DMA, this means that this
682 * request can be directly processed by the engine,
683 * without partial updates. So we can chain it at the
684 * DMA level with other requests.
685 */
686 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
687
688 if (op && type != CESA_TDMA_RESULT) {
db509a45 689 /* Add dummy desc to wait for crypto operation end */
53da740f 690 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
db509a45
BB
691 if (ret)
692 goto err_free_tdma;
693 }
694
695 if (!creq->last_req)
696 creq->cache_ptr = req->nbytes + creq->cache_ptr -
697 iter.base.len;
698 else
699 creq->cache_ptr = 0;
700
f34dad17
RP
701 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
702
703 if (type != CESA_TDMA_RESULT)
704 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
85030c51 705
8759fec4
RP
706 if (set_state) {
707 /*
708 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
709 * let the step logic know that the IVDIG registers should be
710 * explicitly set before launching a TDMA chain.
711 */
712 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
713 }
714
db509a45
BB
715 return 0;
716
717err_free_tdma:
53da740f 718 mv_cesa_dma_cleanup(basereq);
db509a45
BB
719 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
720
721err:
722 mv_cesa_ahash_last_cleanup(req);
723
724 return ret;
725}
726
f63601fd
BB
727static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
728{
729 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
db509a45 730
f63601fd 731 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
c22dafb3
LC
732 if (creq->src_nents < 0) {
733 dev_err(cesa_dev->dev, "Invalid number of src SG");
734 return creq->src_nents;
735 }
f63601fd 736
6dc156f4 737 *cached = mv_cesa_ahash_cache_req(req);
db509a45
BB
738
739 if (*cached)
740 return 0;
741
53da740f 742 if (cesa_dev->caps->has_tdma)
6dc156f4
TP
743 return mv_cesa_ahash_dma_req_init(req);
744 else
745 return 0;
f63601fd
BB
746}
747
bf8f91e7 748static int mv_cesa_ahash_queue_req(struct ahash_request *req)
f63601fd
BB
749{
750 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
bf8f91e7 751 struct mv_cesa_engine *engine;
f63601fd
BB
752 bool cached = false;
753 int ret;
754
f63601fd
BB
755 ret = mv_cesa_ahash_req_init(req, &cached);
756 if (ret)
757 return ret;
758
759 if (cached)
760 return 0;
761
bf8f91e7
RP
762 engine = mv_cesa_select_engine(req->nbytes);
763 mv_cesa_ahash_prepare(&req->base, engine);
764
53da740f 765 ret = mv_cesa_queue_req(&req->base, &creq->base);
bf8f91e7 766
cfcd2271 767 if (mv_cesa_req_needs_cleanup(&req->base, ret))
db509a45 768 mv_cesa_ahash_cleanup(req);
db509a45
BB
769
770 return ret;
f63601fd
BB
771}
772
bf8f91e7
RP
773static int mv_cesa_ahash_update(struct ahash_request *req)
774{
775 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
776
777 creq->len += req->nbytes;
778
779 return mv_cesa_ahash_queue_req(req);
780}
781
f63601fd
BB
782static int mv_cesa_ahash_final(struct ahash_request *req)
783{
784 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
785 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
f63601fd
BB
786
787 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
788 creq->last_req = true;
789 req->nbytes = 0;
790
bf8f91e7 791 return mv_cesa_ahash_queue_req(req);
f63601fd
BB
792}
793
794static int mv_cesa_ahash_finup(struct ahash_request *req)
795{
796 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
797 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
f63601fd
BB
798
799 creq->len += req->nbytes;
800 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
801 creq->last_req = true;
802
bf8f91e7 803 return mv_cesa_ahash_queue_req(req);
f63601fd
BB
804}
805
a6479ea4
RK
806static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
807 u64 *len, void *cache)
7aeef693 808{
7aeef693
AE
809 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
810 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
811 unsigned int digsize = crypto_ahash_digestsize(ahash);
a6479ea4 812 unsigned int blocksize;
7aeef693 813
80754539 814 blocksize = crypto_ahash_blocksize(ahash);
7aeef693 815
a6479ea4
RK
816 *len = creq->len;
817 memcpy(hash, creq->state, digsize);
818 memset(cache, 0, blocksize);
063327f5 819 memcpy(cache, creq->cache, creq->cache_ptr);
7aeef693
AE
820
821 return 0;
822}
823
a6479ea4
RK
824static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
825 u64 len, const void *cache)
7aeef693 826{
7aeef693
AE
827 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
828 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
829 unsigned int digsize = crypto_ahash_digestsize(ahash);
a6479ea4 830 unsigned int blocksize;
7aeef693
AE
831 unsigned int cache_ptr;
832 int ret;
833
e72f407e
RK
834 ret = crypto_ahash_init(req);
835 if (ret)
836 return ret;
837
80754539 838 blocksize = crypto_ahash_blocksize(ahash);
a6479ea4 839 if (len >= blocksize)
c3bf02a2
RK
840 mv_cesa_update_op_cfg(&creq->op_tmpl,
841 CESA_SA_DESC_CFG_MID_FRAG,
842 CESA_SA_DESC_CFG_FRAG_MSK);
843
a6479ea4
RK
844 creq->len = len;
845 memcpy(creq->state, hash, digsize);
7aeef693
AE
846 creq->cache_ptr = 0;
847
a6479ea4 848 cache_ptr = do_div(len, blocksize);
7aeef693
AE
849 if (!cache_ptr)
850 return 0;
851
a6479ea4 852 memcpy(creq->cache, cache, cache_ptr);
7aeef693
AE
853 creq->cache_ptr = cache_ptr;
854
855 return 0;
856}
857
a6479ea4
RK
858static int mv_cesa_md5_init(struct ahash_request *req)
859{
b0ef5106 860 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 861 struct mv_cesa_op_ctx tmpl = { };
a6479ea4
RK
862
863 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
57cfda1a
RP
864
865 mv_cesa_ahash_init(req, &tmpl, true);
866
b0ef5106
BB
867 creq->state[0] = MD5_H0;
868 creq->state[1] = MD5_H1;
869 creq->state[2] = MD5_H2;
870 creq->state[3] = MD5_H3;
a6479ea4 871
a6479ea4
RK
872 return 0;
873}
874
875static int mv_cesa_md5_export(struct ahash_request *req, void *out)
876{
877 struct md5_state *out_state = out;
878
879 return mv_cesa_ahash_export(req, out_state->hash,
880 &out_state->byte_count, out_state->block);
881}
882
883static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
884{
885 const struct md5_state *in_state = in;
886
887 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
888 in_state->block);
889}
890
7aeef693
AE
891static int mv_cesa_md5_digest(struct ahash_request *req)
892{
893 int ret;
894
895 ret = mv_cesa_md5_init(req);
896 if (ret)
897 return ret;
898
899 return mv_cesa_ahash_finup(req);
900}
901
902struct ahash_alg mv_md5_alg = {
903 .init = mv_cesa_md5_init,
904 .update = mv_cesa_ahash_update,
905 .final = mv_cesa_ahash_final,
906 .finup = mv_cesa_ahash_finup,
907 .digest = mv_cesa_md5_digest,
908 .export = mv_cesa_md5_export,
909 .import = mv_cesa_md5_import,
910 .halg = {
911 .digestsize = MD5_DIGEST_SIZE,
9f5594c9 912 .statesize = sizeof(struct md5_state),
7aeef693
AE
913 .base = {
914 .cra_name = "md5",
915 .cra_driver_name = "mv-md5",
916 .cra_priority = 300,
917 .cra_flags = CRYPTO_ALG_ASYNC |
918 CRYPTO_ALG_KERN_DRIVER_ONLY,
919 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
920 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
921 .cra_init = mv_cesa_ahash_cra_init,
922 .cra_module = THIS_MODULE,
923 }
924 }
925};
926
f63601fd
BB
927static int mv_cesa_sha1_init(struct ahash_request *req)
928{
b0ef5106 929 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 930 struct mv_cesa_op_ctx tmpl = { };
f63601fd
BB
931
932 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
57cfda1a
RP
933
934 mv_cesa_ahash_init(req, &tmpl, false);
935
b0ef5106
BB
936 creq->state[0] = SHA1_H0;
937 creq->state[1] = SHA1_H1;
938 creq->state[2] = SHA1_H2;
939 creq->state[3] = SHA1_H3;
940 creq->state[4] = SHA1_H4;
f63601fd 941
f63601fd
BB
942 return 0;
943}
944
945static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
946{
947 struct sha1_state *out_state = out;
f63601fd 948
a6479ea4
RK
949 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
950 out_state->buffer);
f63601fd
BB
951}
952
953static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
954{
955 const struct sha1_state *in_state = in;
f63601fd 956
a6479ea4
RK
957 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
958 in_state->buffer);
f63601fd
BB
959}
960
961static int mv_cesa_sha1_digest(struct ahash_request *req)
962{
963 int ret;
964
965 ret = mv_cesa_sha1_init(req);
966 if (ret)
967 return ret;
968
969 return mv_cesa_ahash_finup(req);
970}
971
972struct ahash_alg mv_sha1_alg = {
973 .init = mv_cesa_sha1_init,
974 .update = mv_cesa_ahash_update,
975 .final = mv_cesa_ahash_final,
976 .finup = mv_cesa_ahash_finup,
977 .digest = mv_cesa_sha1_digest,
978 .export = mv_cesa_sha1_export,
979 .import = mv_cesa_sha1_import,
980 .halg = {
981 .digestsize = SHA1_DIGEST_SIZE,
9f5594c9 982 .statesize = sizeof(struct sha1_state),
f63601fd
BB
983 .base = {
984 .cra_name = "sha1",
985 .cra_driver_name = "mv-sha1",
986 .cra_priority = 300,
987 .cra_flags = CRYPTO_ALG_ASYNC |
988 CRYPTO_ALG_KERN_DRIVER_ONLY,
989 .cra_blocksize = SHA1_BLOCK_SIZE,
990 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
991 .cra_init = mv_cesa_ahash_cra_init,
992 .cra_module = THIS_MODULE,
993 }
994 }
995};
996
f85a762e
AE
997static int mv_cesa_sha256_init(struct ahash_request *req)
998{
b0ef5106 999 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
d30cb2fa 1000 struct mv_cesa_op_ctx tmpl = { };
f85a762e
AE
1001
1002 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
57cfda1a
RP
1003
1004 mv_cesa_ahash_init(req, &tmpl, false);
1005
b0ef5106
BB
1006 creq->state[0] = SHA256_H0;
1007 creq->state[1] = SHA256_H1;
1008 creq->state[2] = SHA256_H2;
1009 creq->state[3] = SHA256_H3;
1010 creq->state[4] = SHA256_H4;
1011 creq->state[5] = SHA256_H5;
1012 creq->state[6] = SHA256_H6;
1013 creq->state[7] = SHA256_H7;
f85a762e 1014
f85a762e
AE
1015 return 0;
1016}
1017
1018static int mv_cesa_sha256_digest(struct ahash_request *req)
1019{
1020 int ret;
1021
1022 ret = mv_cesa_sha256_init(req);
1023 if (ret)
1024 return ret;
1025
1026 return mv_cesa_ahash_finup(req);
1027}
1028
1029static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1030{
1031 struct sha256_state *out_state = out;
f85a762e 1032
a6479ea4
RK
1033 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1034 out_state->buf);
f85a762e
AE
1035}
1036
1037static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1038{
1039 const struct sha256_state *in_state = in;
f85a762e 1040
a6479ea4
RK
1041 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1042 in_state->buf);
f85a762e
AE
1043}
1044
1045struct ahash_alg mv_sha256_alg = {
1046 .init = mv_cesa_sha256_init,
1047 .update = mv_cesa_ahash_update,
1048 .final = mv_cesa_ahash_final,
1049 .finup = mv_cesa_ahash_finup,
1050 .digest = mv_cesa_sha256_digest,
1051 .export = mv_cesa_sha256_export,
1052 .import = mv_cesa_sha256_import,
1053 .halg = {
1054 .digestsize = SHA256_DIGEST_SIZE,
9f5594c9 1055 .statesize = sizeof(struct sha256_state),
f85a762e
AE
1056 .base = {
1057 .cra_name = "sha256",
1058 .cra_driver_name = "mv-sha256",
1059 .cra_priority = 300,
1060 .cra_flags = CRYPTO_ALG_ASYNC |
1061 CRYPTO_ALG_KERN_DRIVER_ONLY,
1062 .cra_blocksize = SHA256_BLOCK_SIZE,
1063 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1064 .cra_init = mv_cesa_ahash_cra_init,
1065 .cra_module = THIS_MODULE,
1066 }
1067 }
1068};
1069
f63601fd
BB
1070struct mv_cesa_ahash_result {
1071 struct completion completion;
1072 int error;
1073};
1074
1075static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1076 int error)
1077{
1078 struct mv_cesa_ahash_result *result = req->data;
1079
1080 if (error == -EINPROGRESS)
1081 return;
1082
1083 result->error = error;
1084 complete(&result->completion);
1085}
1086
1087static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1088 void *state, unsigned int blocksize)
1089{
1090 struct mv_cesa_ahash_result result;
1091 struct scatterlist sg;
1092 int ret;
1093
1094 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1095 mv_cesa_hmac_ahash_complete, &result);
1096 sg_init_one(&sg, pad, blocksize);
1097 ahash_request_set_crypt(req, &sg, pad, blocksize);
1098 init_completion(&result.completion);
1099
1100 ret = crypto_ahash_init(req);
1101 if (ret)
1102 return ret;
1103
1104 ret = crypto_ahash_update(req);
1105 if (ret && ret != -EINPROGRESS)
1106 return ret;
1107
1108 wait_for_completion_interruptible(&result.completion);
1109 if (result.error)
1110 return result.error;
1111
1112 ret = crypto_ahash_export(req, state);
1113 if (ret)
1114 return ret;
1115
1116 return 0;
1117}
1118
1119static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1120 const u8 *key, unsigned int keylen,
1121 u8 *ipad, u8 *opad,
1122 unsigned int blocksize)
1123{
1124 struct mv_cesa_ahash_result result;
1125 struct scatterlist sg;
1126 int ret;
1127 int i;
1128
1129 if (keylen <= blocksize) {
1130 memcpy(ipad, key, keylen);
1131 } else {
1132 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1133
1134 if (!keydup)
1135 return -ENOMEM;
1136
1137 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1138 mv_cesa_hmac_ahash_complete,
1139 &result);
1140 sg_init_one(&sg, keydup, keylen);
1141 ahash_request_set_crypt(req, &sg, ipad, keylen);
1142 init_completion(&result.completion);
1143
1144 ret = crypto_ahash_digest(req);
1145 if (ret == -EINPROGRESS) {
1146 wait_for_completion_interruptible(&result.completion);
1147 ret = result.error;
1148 }
1149
1150 /* Set the memory region to 0 to avoid any leak. */
1151 memset(keydup, 0, keylen);
1152 kfree(keydup);
1153
1154 if (ret)
1155 return ret;
1156
1157 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1158 }
1159
1160 memset(ipad + keylen, 0, blocksize - keylen);
1161 memcpy(opad, ipad, blocksize);
1162
1163 for (i = 0; i < blocksize; i++) {
d477d813
CL
1164 ipad[i] ^= HMAC_IPAD_VALUE;
1165 opad[i] ^= HMAC_OPAD_VALUE;
f63601fd
BB
1166 }
1167
1168 return 0;
1169}
1170
1171static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1172 const u8 *key, unsigned int keylen,
1173 void *istate, void *ostate)
1174{
1175 struct ahash_request *req;
1176 struct crypto_ahash *tfm;
1177 unsigned int blocksize;
1178 u8 *ipad = NULL;
1179 u8 *opad;
1180 int ret;
1181
85d7311f 1182 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
f63601fd
BB
1183 if (IS_ERR(tfm))
1184 return PTR_ERR(tfm);
1185
1186 req = ahash_request_alloc(tfm, GFP_KERNEL);
1187 if (!req) {
1188 ret = -ENOMEM;
1189 goto free_ahash;
1190 }
1191
1192 crypto_ahash_clear_flags(tfm, ~0);
1193
1194 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1195
6396bb22 1196 ipad = kcalloc(2, blocksize, GFP_KERNEL);
f63601fd
BB
1197 if (!ipad) {
1198 ret = -ENOMEM;
1199 goto free_req;
1200 }
1201
1202 opad = ipad + blocksize;
1203
1204 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1205 if (ret)
1206 goto free_ipad;
1207
1208 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1209 if (ret)
1210 goto free_ipad;
1211
1212 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1213
1214free_ipad:
1215 kfree(ipad);
1216free_req:
1217 ahash_request_free(req);
1218free_ahash:
1219 crypto_free_ahash(tfm);
1220
1221 return ret;
1222}
1223
1224static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1225{
1226 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1227
1228 ctx->base.ops = &mv_cesa_ahash_req_ops;
1229
1230 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1231 sizeof(struct mv_cesa_ahash_req));
1232 return 0;
1233}
1234
7aeef693
AE
1235static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1236{
1237 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1238 struct mv_cesa_op_ctx tmpl = { };
7aeef693
AE
1239
1240 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1241 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1242
a9eb678f 1243 mv_cesa_ahash_init(req, &tmpl, true);
7aeef693
AE
1244
1245 return 0;
1246}
1247
1248static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1249 unsigned int keylen)
1250{
1251 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1252 struct md5_state istate, ostate;
1253 int ret, i;
1254
1255 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1256 if (ret)
1257 return ret;
1258
1259 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1260 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1261
1262 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1263 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1264
1265 return 0;
1266}
1267
1268static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1269{
1270 int ret;
1271
1272 ret = mv_cesa_ahmac_md5_init(req);
1273 if (ret)
1274 return ret;
1275
1276 return mv_cesa_ahash_finup(req);
1277}
1278
1279struct ahash_alg mv_ahmac_md5_alg = {
1280 .init = mv_cesa_ahmac_md5_init,
1281 .update = mv_cesa_ahash_update,
1282 .final = mv_cesa_ahash_final,
1283 .finup = mv_cesa_ahash_finup,
1284 .digest = mv_cesa_ahmac_md5_digest,
1285 .setkey = mv_cesa_ahmac_md5_setkey,
1286 .export = mv_cesa_md5_export,
1287 .import = mv_cesa_md5_import,
1288 .halg = {
1289 .digestsize = MD5_DIGEST_SIZE,
1290 .statesize = sizeof(struct md5_state),
1291 .base = {
1292 .cra_name = "hmac(md5)",
1293 .cra_driver_name = "mv-hmac-md5",
1294 .cra_priority = 300,
1295 .cra_flags = CRYPTO_ALG_ASYNC |
1296 CRYPTO_ALG_KERN_DRIVER_ONLY,
1297 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1298 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1299 .cra_init = mv_cesa_ahmac_cra_init,
1300 .cra_module = THIS_MODULE,
1301 }
1302 }
1303};
1304
f63601fd
BB
1305static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1306{
1307 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1308 struct mv_cesa_op_ctx tmpl = { };
f63601fd
BB
1309
1310 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1311 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1312
a9eb678f 1313 mv_cesa_ahash_init(req, &tmpl, false);
f63601fd
BB
1314
1315 return 0;
1316}
1317
1318static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1319 unsigned int keylen)
1320{
1321 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1322 struct sha1_state istate, ostate;
1323 int ret, i;
1324
1325 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1326 if (ret)
1327 return ret;
1328
1329 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1330 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1331
1332 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1333 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1334
1335 return 0;
1336}
1337
1338static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1339{
1340 int ret;
1341
1342 ret = mv_cesa_ahmac_sha1_init(req);
1343 if (ret)
1344 return ret;
1345
1346 return mv_cesa_ahash_finup(req);
1347}
1348
1349struct ahash_alg mv_ahmac_sha1_alg = {
1350 .init = mv_cesa_ahmac_sha1_init,
1351 .update = mv_cesa_ahash_update,
1352 .final = mv_cesa_ahash_final,
1353 .finup = mv_cesa_ahash_finup,
1354 .digest = mv_cesa_ahmac_sha1_digest,
1355 .setkey = mv_cesa_ahmac_sha1_setkey,
1356 .export = mv_cesa_sha1_export,
1357 .import = mv_cesa_sha1_import,
1358 .halg = {
1359 .digestsize = SHA1_DIGEST_SIZE,
1360 .statesize = sizeof(struct sha1_state),
1361 .base = {
1362 .cra_name = "hmac(sha1)",
1363 .cra_driver_name = "mv-hmac-sha1",
1364 .cra_priority = 300,
1365 .cra_flags = CRYPTO_ALG_ASYNC |
1366 CRYPTO_ALG_KERN_DRIVER_ONLY,
1367 .cra_blocksize = SHA1_BLOCK_SIZE,
1368 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1369 .cra_init = mv_cesa_ahmac_cra_init,
1370 .cra_module = THIS_MODULE,
1371 }
1372 }
1373};
f85a762e
AE
1374
1375static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1376 unsigned int keylen)
1377{
1378 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1379 struct sha256_state istate, ostate;
1380 int ret, i;
1381
1382 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1383 if (ret)
1384 return ret;
1385
1386 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1387 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1388
1389 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1390 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1391
1392 return 0;
1393}
1394
1395static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1396{
1397 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
d30cb2fa 1398 struct mv_cesa_op_ctx tmpl = { };
f85a762e
AE
1399
1400 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1401 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1402
a9eb678f 1403 mv_cesa_ahash_init(req, &tmpl, false);
f85a762e
AE
1404
1405 return 0;
1406}
1407
1408static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1409{
1410 int ret;
1411
1412 ret = mv_cesa_ahmac_sha256_init(req);
1413 if (ret)
1414 return ret;
1415
1416 return mv_cesa_ahash_finup(req);
1417}
1418
1419struct ahash_alg mv_ahmac_sha256_alg = {
1420 .init = mv_cesa_ahmac_sha256_init,
1421 .update = mv_cesa_ahash_update,
1422 .final = mv_cesa_ahash_final,
1423 .finup = mv_cesa_ahash_finup,
1424 .digest = mv_cesa_ahmac_sha256_digest,
1425 .setkey = mv_cesa_ahmac_sha256_setkey,
1426 .export = mv_cesa_sha256_export,
1427 .import = mv_cesa_sha256_import,
1428 .halg = {
1429 .digestsize = SHA256_DIGEST_SIZE,
1430 .statesize = sizeof(struct sha256_state),
1431 .base = {
1432 .cra_name = "hmac(sha256)",
1433 .cra_driver_name = "mv-hmac-sha256",
1434 .cra_priority = 300,
1435 .cra_flags = CRYPTO_ALG_ASYNC |
1436 CRYPTO_ALG_KERN_DRIVER_ONLY,
1437 .cra_blocksize = SHA256_BLOCK_SIZE,
1438 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1439 .cra_init = mv_cesa_ahmac_cra_init,
1440 .cra_module = THIS_MODULE,
1441 }
1442 }
1443};