]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/ccp/ccp-ops.c
crypto: powerpc - Stress test for vpmsum implementations
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / ccp / ccp-ops.c
CommitLineData
63b94509
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
ea0375af 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
63b94509
TL
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
a43eb985 7 * Author: Gary R Hook <gary.hook@amd.com>
63b94509
TL
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
63b94509 17#include <linux/interrupt.h>
63b94509 18#include <crypto/scatterwalk.h>
ea0375af 19#include <linux/ccp.h>
63b94509
TL
20
21#include "ccp-dev.h"
22
c11baa02 23/* SHA initial context values */
4b394a23 24static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
25 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
26 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
4b394a23 27 cpu_to_be32(SHA1_H4),
c11baa02
TL
28};
29
4b394a23 30static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
31 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
32 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
33 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
34 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35};
36
4b394a23 37static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
38 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
39 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
40 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
41 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42};
43
4b394a23
GH
44#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
45 ccp_gen_jobid(ccp) : 0)
46
63b94509
TL
47static u32 ccp_gen_jobid(struct ccp_device *ccp)
48{
49 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
50}
51
52static void ccp_sg_free(struct ccp_sg_workarea *wa)
53{
54 if (wa->dma_count)
55 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
56
57 wa->dma_count = 0;
58}
59
60static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
81a59f00 61 struct scatterlist *sg, u64 len,
63b94509
TL
62 enum dma_data_direction dma_dir)
63{
64 memset(wa, 0, sizeof(*wa));
65
66 wa->sg = sg;
67 if (!sg)
68 return 0;
69
fb43f694
TL
70 wa->nents = sg_nents_for_len(sg, len);
71 if (wa->nents < 0)
72 return wa->nents;
73
63b94509
TL
74 wa->bytes_left = len;
75 wa->sg_used = 0;
76
77 if (len == 0)
78 return 0;
79
80 if (dma_dir == DMA_NONE)
81 return 0;
82
83 wa->dma_sg = sg;
84 wa->dma_dev = dev;
85 wa->dma_dir = dma_dir;
86 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
87 if (!wa->dma_count)
88 return -ENOMEM;
89
63b94509
TL
90 return 0;
91}
92
93static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
94{
81a59f00 95 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
63b94509
TL
96
97 if (!wa->sg)
98 return;
99
100 wa->sg_used += nbytes;
101 wa->bytes_left -= nbytes;
102 if (wa->sg_used == wa->sg->length) {
103 wa->sg = sg_next(wa->sg);
104 wa->sg_used = 0;
105 }
106}
107
108static void ccp_dm_free(struct ccp_dm_workarea *wa)
109{
110 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
111 if (wa->address)
112 dma_pool_free(wa->dma_pool, wa->address,
113 wa->dma.address);
114 } else {
115 if (wa->dma.address)
116 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
117 wa->dma.dir);
118 kfree(wa->address);
119 }
120
121 wa->address = NULL;
122 wa->dma.address = 0;
123}
124
125static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
126 struct ccp_cmd_queue *cmd_q,
127 unsigned int len,
128 enum dma_data_direction dir)
129{
130 memset(wa, 0, sizeof(*wa));
131
132 if (!len)
133 return 0;
134
135 wa->dev = cmd_q->ccp->dev;
136 wa->length = len;
137
138 if (len <= CCP_DMAPOOL_MAX_SIZE) {
139 wa->dma_pool = cmd_q->dma_pool;
140
141 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
142 &wa->dma.address);
143 if (!wa->address)
144 return -ENOMEM;
145
146 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
147
148 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
149 } else {
150 wa->address = kzalloc(len, GFP_KERNEL);
151 if (!wa->address)
152 return -ENOMEM;
153
154 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
155 dir);
156 if (!wa->dma.address)
157 return -ENOMEM;
158
159 wa->dma.length = len;
160 }
161 wa->dma.dir = dir;
162
163 return 0;
164}
165
166static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
167 struct scatterlist *sg, unsigned int sg_offset,
168 unsigned int len)
169{
170 WARN_ON(!wa->address);
171
172 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
173 0);
174}
175
176static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
177 struct scatterlist *sg, unsigned int sg_offset,
178 unsigned int len)
179{
180 WARN_ON(!wa->address);
181
182 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
183 1);
184}
185
355eba5d 186static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
83d650ab 187 unsigned int wa_offset,
355eba5d 188 struct scatterlist *sg,
83d650ab
GH
189 unsigned int sg_offset,
190 unsigned int len)
63b94509 191{
83d650ab
GH
192 u8 *p, *q;
193
194 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
195
196 p = wa->address + wa_offset;
197 q = p + len - 1;
198 while (p < q) {
199 *p = *p ^ *q;
200 *q = *p ^ *q;
201 *p = *p ^ *q;
202 p++;
203 q--;
63b94509 204 }
355eba5d 205 return 0;
63b94509
TL
206}
207
208static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
83d650ab 209 unsigned int wa_offset,
63b94509 210 struct scatterlist *sg,
83d650ab 211 unsigned int sg_offset,
63b94509
TL
212 unsigned int len)
213{
83d650ab
GH
214 u8 *p, *q;
215
216 p = wa->address + wa_offset;
217 q = p + len - 1;
218 while (p < q) {
219 *p = *p ^ *q;
220 *q = *p ^ *q;
221 *p = *p ^ *q;
222 p++;
223 q--;
63b94509 224 }
83d650ab
GH
225
226 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
63b94509
TL
227}
228
229static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
230{
231 ccp_dm_free(&data->dm_wa);
232 ccp_sg_free(&data->sg_wa);
233}
234
235static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
81a59f00 236 struct scatterlist *sg, u64 sg_len,
63b94509
TL
237 unsigned int dm_len,
238 enum dma_data_direction dir)
239{
240 int ret;
241
242 memset(data, 0, sizeof(*data));
243
244 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
245 dir);
246 if (ret)
247 goto e_err;
248
249 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
250 if (ret)
251 goto e_err;
252
253 return 0;
254
255e_err:
256 ccp_free_data(data, cmd_q);
257
258 return ret;
259}
260
261static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
262{
263 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
264 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
265 unsigned int buf_count, nbytes;
266
267 /* Clear the buffer if setting it */
268 if (!from)
269 memset(dm_wa->address, 0, dm_wa->length);
270
271 if (!sg_wa->sg)
272 return 0;
273
81a59f00
TL
274 /* Perform the copy operation
275 * nbytes will always be <= UINT_MAX because dm_wa->length is
276 * an unsigned int
277 */
278 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
63b94509
TL
279 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
280 nbytes, from);
281
282 /* Update the structures and generate the count */
283 buf_count = 0;
284 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
81a59f00
TL
285 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
286 dm_wa->length - buf_count);
287 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
63b94509
TL
288
289 buf_count += nbytes;
290 ccp_update_sg_workarea(sg_wa, nbytes);
291 }
292
293 return buf_count;
294}
295
296static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
297{
298 return ccp_queue_buf(data, 0);
299}
300
301static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
302{
303 return ccp_queue_buf(data, 1);
304}
305
306static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
307 struct ccp_op *op, unsigned int block_size,
308 bool blocksize_op)
309{
310 unsigned int sg_src_len, sg_dst_len, op_len;
311
312 /* The CCP can only DMA from/to one address each per operation. This
313 * requires that we find the smallest DMA area between the source
81a59f00
TL
314 * and destination. The resulting len values will always be <= UINT_MAX
315 * because the dma length is an unsigned int.
63b94509 316 */
81a59f00
TL
317 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
318 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
63b94509
TL
319
320 if (dst) {
81a59f00
TL
321 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
322 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
63b94509 323 op_len = min(sg_src_len, sg_dst_len);
8db88467 324 } else {
63b94509 325 op_len = sg_src_len;
8db88467 326 }
63b94509
TL
327
328 /* The data operation length will be at least block_size in length
329 * or the smaller of available sg room remaining for the source or
330 * the destination
331 */
332 op_len = max(op_len, block_size);
333
334 /* Unless we have to buffer data, there's no reason to wait */
335 op->soc = 0;
336
337 if (sg_src_len < block_size) {
338 /* Not enough data in the sg element, so it
339 * needs to be buffered into a blocksize chunk
340 */
341 int cp_len = ccp_fill_queue_buf(src);
342
343 op->soc = 1;
344 op->src.u.dma.address = src->dm_wa.dma.address;
345 op->src.u.dma.offset = 0;
346 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
347 } else {
348 /* Enough data in the sg element, but we need to
349 * adjust for any previously copied data
350 */
351 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
352 op->src.u.dma.offset = src->sg_wa.sg_used;
353 op->src.u.dma.length = op_len & ~(block_size - 1);
354
355 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
356 }
357
358 if (dst) {
359 if (sg_dst_len < block_size) {
360 /* Not enough room in the sg element or we're on the
361 * last piece of data (when using padding), so the
362 * output needs to be buffered into a blocksize chunk
363 */
364 op->soc = 1;
365 op->dst.u.dma.address = dst->dm_wa.dma.address;
366 op->dst.u.dma.offset = 0;
367 op->dst.u.dma.length = op->src.u.dma.length;
368 } else {
369 /* Enough room in the sg element, but we need to
370 * adjust for any previously used area
371 */
372 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
373 op->dst.u.dma.offset = dst->sg_wa.sg_used;
374 op->dst.u.dma.length = op->src.u.dma.length;
375 }
376 }
377}
378
379static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
380 struct ccp_op *op)
381{
382 op->init = 0;
383
384 if (dst) {
385 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
386 ccp_empty_queue_buf(dst);
387 else
388 ccp_update_sg_workarea(&dst->sg_wa,
389 op->dst.u.dma.length);
390 }
391}
392
956ee21a
GH
393static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
394 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
395 u32 byte_swap, bool from)
63b94509
TL
396{
397 struct ccp_op op;
398
399 memset(&op, 0, sizeof(op));
400
401 op.cmd_q = cmd_q;
402 op.jobid = jobid;
403 op.eom = 1;
404
405 if (from) {
406 op.soc = 1;
956ee21a
GH
407 op.src.type = CCP_MEMTYPE_SB;
408 op.src.u.sb = sb;
63b94509
TL
409 op.dst.type = CCP_MEMTYPE_SYSTEM;
410 op.dst.u.dma.address = wa->dma.address;
411 op.dst.u.dma.length = wa->length;
412 } else {
413 op.src.type = CCP_MEMTYPE_SYSTEM;
414 op.src.u.dma.address = wa->dma.address;
415 op.src.u.dma.length = wa->length;
956ee21a
GH
416 op.dst.type = CCP_MEMTYPE_SB;
417 op.dst.u.sb = sb;
63b94509
TL
418 }
419
420 op.u.passthru.byte_swap = byte_swap;
421
a43eb985 422 return cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
423}
424
956ee21a
GH
425static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
426 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
427 u32 byte_swap)
63b94509 428{
956ee21a 429 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
63b94509
TL
430}
431
956ee21a
GH
432static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
433 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
434 u32 byte_swap)
63b94509 435{
956ee21a 436 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
63b94509
TL
437}
438
439static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
440 struct ccp_cmd *cmd)
441{
442 struct ccp_aes_engine *aes = &cmd->u.aes;
443 struct ccp_dm_workarea key, ctx;
444 struct ccp_data src;
445 struct ccp_op op;
446 unsigned int dm_offset;
447 int ret;
448
449 if (!((aes->key_len == AES_KEYSIZE_128) ||
450 (aes->key_len == AES_KEYSIZE_192) ||
451 (aes->key_len == AES_KEYSIZE_256)))
452 return -EINVAL;
453
454 if (aes->src_len & (AES_BLOCK_SIZE - 1))
455 return -EINVAL;
456
457 if (aes->iv_len != AES_BLOCK_SIZE)
458 return -EINVAL;
459
460 if (!aes->key || !aes->iv || !aes->src)
461 return -EINVAL;
462
463 if (aes->cmac_final) {
464 if (aes->cmac_key_len != AES_BLOCK_SIZE)
465 return -EINVAL;
466
467 if (!aes->cmac_key)
468 return -EINVAL;
469 }
470
956ee21a
GH
471 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
472 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
473
474 ret = -EIO;
475 memset(&op, 0, sizeof(op));
476 op.cmd_q = cmd_q;
4b394a23 477 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
478 op.sb_key = cmd_q->sb_key;
479 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
480 op.init = 1;
481 op.u.aes.type = aes->type;
482 op.u.aes.mode = aes->mode;
483 op.u.aes.action = aes->action;
484
956ee21a 485 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
486 * and must be in little endian format. Use the 256-bit byte
487 * swap passthru option to convert from big endian to little
488 * endian.
489 */
490 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 491 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
492 DMA_TO_DEVICE);
493 if (ret)
494 return ret;
495
956ee21a 496 dm_offset = CCP_SB_BYTES - aes->key_len;
63b94509 497 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
956ee21a
GH
498 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
499 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
500 if (ret) {
501 cmd->engine_error = cmd_q->cmd_error;
502 goto e_key;
503 }
504
956ee21a 505 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
506 * must be in little endian format. Use the 256-bit byte swap
507 * passthru option to convert from big endian to little endian.
508 */
509 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 510 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
511 DMA_BIDIRECTIONAL);
512 if (ret)
513 goto e_key;
514
956ee21a 515 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509 516 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
956ee21a
GH
517 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
518 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
519 if (ret) {
520 cmd->engine_error = cmd_q->cmd_error;
521 goto e_ctx;
522 }
523
524 /* Send data to the CCP AES engine */
525 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
526 AES_BLOCK_SIZE, DMA_TO_DEVICE);
527 if (ret)
528 goto e_ctx;
529
530 while (src.sg_wa.bytes_left) {
531 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
532 if (aes->cmac_final && !src.sg_wa.bytes_left) {
533 op.eom = 1;
534
535 /* Push the K1/K2 key to the CCP now */
956ee21a
GH
536 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
537 op.sb_ctx,
538 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
539 if (ret) {
540 cmd->engine_error = cmd_q->cmd_error;
541 goto e_src;
542 }
543
544 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
545 aes->cmac_key_len);
956ee21a
GH
546 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
547 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
548 if (ret) {
549 cmd->engine_error = cmd_q->cmd_error;
550 goto e_src;
551 }
552 }
553
a43eb985 554 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
555 if (ret) {
556 cmd->engine_error = cmd_q->cmd_error;
557 goto e_src;
558 }
559
560 ccp_process_data(&src, NULL, &op);
561 }
562
563 /* Retrieve the AES context - convert from LE to BE using
564 * 32-byte (256-bit) byteswapping
565 */
956ee21a
GH
566 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
567 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
568 if (ret) {
569 cmd->engine_error = cmd_q->cmd_error;
570 goto e_src;
571 }
572
573 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 574 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
575 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
576
577e_src:
578 ccp_free_data(&src, cmd_q);
579
580e_ctx:
581 ccp_dm_free(&ctx);
582
583e_key:
584 ccp_dm_free(&key);
585
586 return ret;
587}
588
589static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
590{
591 struct ccp_aes_engine *aes = &cmd->u.aes;
592 struct ccp_dm_workarea key, ctx;
593 struct ccp_data src, dst;
594 struct ccp_op op;
595 unsigned int dm_offset;
596 bool in_place = false;
597 int ret;
598
599 if (aes->mode == CCP_AES_MODE_CMAC)
600 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
601
602 if (!((aes->key_len == AES_KEYSIZE_128) ||
603 (aes->key_len == AES_KEYSIZE_192) ||
604 (aes->key_len == AES_KEYSIZE_256)))
605 return -EINVAL;
606
607 if (((aes->mode == CCP_AES_MODE_ECB) ||
608 (aes->mode == CCP_AES_MODE_CBC) ||
609 (aes->mode == CCP_AES_MODE_CFB)) &&
610 (aes->src_len & (AES_BLOCK_SIZE - 1)))
611 return -EINVAL;
612
613 if (!aes->key || !aes->src || !aes->dst)
614 return -EINVAL;
615
616 if (aes->mode != CCP_AES_MODE_ECB) {
617 if (aes->iv_len != AES_BLOCK_SIZE)
618 return -EINVAL;
619
620 if (!aes->iv)
621 return -EINVAL;
622 }
623
956ee21a
GH
624 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
625 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
626
627 ret = -EIO;
628 memset(&op, 0, sizeof(op));
629 op.cmd_q = cmd_q;
4b394a23 630 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
631 op.sb_key = cmd_q->sb_key;
632 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
633 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
634 op.u.aes.type = aes->type;
635 op.u.aes.mode = aes->mode;
636 op.u.aes.action = aes->action;
637
956ee21a 638 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
639 * and must be in little endian format. Use the 256-bit byte
640 * swap passthru option to convert from big endian to little
641 * endian.
642 */
643 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 644 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
645 DMA_TO_DEVICE);
646 if (ret)
647 return ret;
648
956ee21a 649 dm_offset = CCP_SB_BYTES - aes->key_len;
63b94509 650 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
956ee21a
GH
651 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
652 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
653 if (ret) {
654 cmd->engine_error = cmd_q->cmd_error;
655 goto e_key;
656 }
657
956ee21a 658 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
659 * must be in little endian format. Use the 256-bit byte swap
660 * passthru option to convert from big endian to little endian.
661 */
662 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 663 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
664 DMA_BIDIRECTIONAL);
665 if (ret)
666 goto e_key;
667
668 if (aes->mode != CCP_AES_MODE_ECB) {
4b394a23 669 /* Load the AES context - convert to LE */
956ee21a 670 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509 671 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
956ee21a
GH
672 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
673 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
674 if (ret) {
675 cmd->engine_error = cmd_q->cmd_error;
676 goto e_ctx;
677 }
678 }
f7cc02b3
GH
679 switch (aes->mode) {
680 case CCP_AES_MODE_CFB: /* CFB128 only */
681 case CCP_AES_MODE_CTR:
682 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
683 break;
684 default:
685 op.u.aes.size = 0;
686 }
63b94509
TL
687
688 /* Prepare the input and output data workareas. For in-place
689 * operations we need to set the dma direction to BIDIRECTIONAL
690 * and copy the src workarea to the dst workarea.
691 */
692 if (sg_virt(aes->src) == sg_virt(aes->dst))
693 in_place = true;
694
695 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
696 AES_BLOCK_SIZE,
697 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
698 if (ret)
699 goto e_ctx;
700
8db88467 701 if (in_place) {
63b94509 702 dst = src;
8db88467 703 } else {
63b94509
TL
704 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
705 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
706 if (ret)
707 goto e_src;
708 }
709
710 /* Send data to the CCP AES engine */
711 while (src.sg_wa.bytes_left) {
712 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
713 if (!src.sg_wa.bytes_left) {
714 op.eom = 1;
715
716 /* Since we don't retrieve the AES context in ECB
717 * mode we have to wait for the operation to complete
718 * on the last piece of data
719 */
720 if (aes->mode == CCP_AES_MODE_ECB)
721 op.soc = 1;
722 }
723
a43eb985 724 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
725 if (ret) {
726 cmd->engine_error = cmd_q->cmd_error;
727 goto e_dst;
728 }
729
730 ccp_process_data(&src, &dst, &op);
731 }
732
733 if (aes->mode != CCP_AES_MODE_ECB) {
734 /* Retrieve the AES context - convert from LE to BE using
735 * 32-byte (256-bit) byteswapping
736 */
956ee21a
GH
737 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
738 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
739 if (ret) {
740 cmd->engine_error = cmd_q->cmd_error;
741 goto e_dst;
742 }
743
744 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 745 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
746 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
747 }
748
749e_dst:
750 if (!in_place)
751 ccp_free_data(&dst, cmd_q);
752
753e_src:
754 ccp_free_data(&src, cmd_q);
755
756e_ctx:
757 ccp_dm_free(&ctx);
758
759e_key:
760 ccp_dm_free(&key);
761
762 return ret;
763}
764
765static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
766 struct ccp_cmd *cmd)
767{
768 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
769 struct ccp_dm_workarea key, ctx;
770 struct ccp_data src, dst;
771 struct ccp_op op;
772 unsigned int unit_size, dm_offset;
773 bool in_place = false;
774 int ret;
775
776 switch (xts->unit_size) {
777 case CCP_XTS_AES_UNIT_SIZE_16:
778 unit_size = 16;
779 break;
780 case CCP_XTS_AES_UNIT_SIZE_512:
781 unit_size = 512;
782 break;
783 case CCP_XTS_AES_UNIT_SIZE_1024:
784 unit_size = 1024;
785 break;
786 case CCP_XTS_AES_UNIT_SIZE_2048:
787 unit_size = 2048;
788 break;
789 case CCP_XTS_AES_UNIT_SIZE_4096:
790 unit_size = 4096;
791 break;
792
793 default:
794 return -EINVAL;
795 }
796
797 if (xts->key_len != AES_KEYSIZE_128)
798 return -EINVAL;
799
800 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
801 return -EINVAL;
802
803 if (xts->iv_len != AES_BLOCK_SIZE)
804 return -EINVAL;
805
806 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
807 return -EINVAL;
808
956ee21a
GH
809 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
810 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
63b94509
TL
811
812 ret = -EIO;
813 memset(&op, 0, sizeof(op));
814 op.cmd_q = cmd_q;
4b394a23 815 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
816 op.sb_key = cmd_q->sb_key;
817 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
818 op.init = 1;
819 op.u.xts.action = xts->action;
820 op.u.xts.unit_size = xts->unit_size;
821
956ee21a 822 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
823 * and must be in little endian format. Use the 256-bit byte
824 * swap passthru option to convert from big endian to little
825 * endian.
826 */
827 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 828 CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
829 DMA_TO_DEVICE);
830 if (ret)
831 return ret;
832
956ee21a 833 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
63b94509
TL
834 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
835 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
956ee21a
GH
836 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
837 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
838 if (ret) {
839 cmd->engine_error = cmd_q->cmd_error;
840 goto e_key;
841 }
842
956ee21a 843 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
844 * for XTS is already in little endian format so no byte swapping
845 * is needed.
846 */
847 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 848 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
849 DMA_BIDIRECTIONAL);
850 if (ret)
851 goto e_key;
852
853 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
956ee21a
GH
854 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
855 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
856 if (ret) {
857 cmd->engine_error = cmd_q->cmd_error;
858 goto e_ctx;
859 }
860
861 /* Prepare the input and output data workareas. For in-place
862 * operations we need to set the dma direction to BIDIRECTIONAL
863 * and copy the src workarea to the dst workarea.
864 */
865 if (sg_virt(xts->src) == sg_virt(xts->dst))
866 in_place = true;
867
868 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
869 unit_size,
870 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
871 if (ret)
872 goto e_ctx;
873
8db88467 874 if (in_place) {
63b94509 875 dst = src;
8db88467 876 } else {
63b94509
TL
877 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
878 unit_size, DMA_FROM_DEVICE);
879 if (ret)
880 goto e_src;
881 }
882
883 /* Send data to the CCP AES engine */
884 while (src.sg_wa.bytes_left) {
885 ccp_prepare_data(&src, &dst, &op, unit_size, true);
886 if (!src.sg_wa.bytes_left)
887 op.eom = 1;
888
a43eb985 889 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
63b94509
TL
890 if (ret) {
891 cmd->engine_error = cmd_q->cmd_error;
892 goto e_dst;
893 }
894
895 ccp_process_data(&src, &dst, &op);
896 }
897
898 /* Retrieve the AES context - convert from LE to BE using
899 * 32-byte (256-bit) byteswapping
900 */
956ee21a
GH
901 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
902 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
903 if (ret) {
904 cmd->engine_error = cmd_q->cmd_error;
905 goto e_dst;
906 }
907
908 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 909 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
910 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
911
912e_dst:
913 if (!in_place)
914 ccp_free_data(&dst, cmd_q);
915
916e_src:
917 ccp_free_data(&src, cmd_q);
918
919e_ctx:
920 ccp_dm_free(&ctx);
921
922e_key:
923 ccp_dm_free(&key);
924
925 return ret;
926}
927
928static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
929{
930 struct ccp_sha_engine *sha = &cmd->u.sha;
931 struct ccp_dm_workarea ctx;
932 struct ccp_data src;
933 struct ccp_op op;
4b394a23
GH
934 unsigned int ioffset, ooffset;
935 unsigned int digest_size;
936 int sb_count;
937 const void *init;
938 u64 block_size;
939 int ctx_size;
63b94509
TL
940 int ret;
941
4b394a23
GH
942 switch (sha->type) {
943 case CCP_SHA_TYPE_1:
944 if (sha->ctx_len < SHA1_DIGEST_SIZE)
945 return -EINVAL;
946 block_size = SHA1_BLOCK_SIZE;
947 break;
948 case CCP_SHA_TYPE_224:
949 if (sha->ctx_len < SHA224_DIGEST_SIZE)
950 return -EINVAL;
951 block_size = SHA224_BLOCK_SIZE;
952 break;
953 case CCP_SHA_TYPE_256:
954 if (sha->ctx_len < SHA256_DIGEST_SIZE)
955 return -EINVAL;
956 block_size = SHA256_BLOCK_SIZE;
957 break;
958 default:
63b94509 959 return -EINVAL;
4b394a23 960 }
63b94509
TL
961
962 if (!sha->ctx)
963 return -EINVAL;
964
4b394a23 965 if (!sha->final && (sha->src_len & (block_size - 1)))
63b94509
TL
966 return -EINVAL;
967
4b394a23
GH
968 /* The version 3 device can't handle zero-length input */
969 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
63b94509 970
4b394a23
GH
971 if (!sha->src_len) {
972 unsigned int digest_len;
973 const u8 *sha_zero;
63b94509 974
4b394a23
GH
975 /* Not final, just return */
976 if (!sha->final)
977 return 0;
63b94509 978
4b394a23
GH
979 /* CCP can't do a zero length sha operation so the
980 * caller must buffer the data.
981 */
982 if (sha->msg_bits)
983 return -EINVAL;
63b94509 984
4b394a23
GH
985 /* The CCP cannot perform zero-length sha operations
986 * so the caller is required to buffer data for the
987 * final operation. However, a sha operation for a
988 * message with a total length of zero is valid so
989 * known values are required to supply the result.
990 */
991 switch (sha->type) {
992 case CCP_SHA_TYPE_1:
993 sha_zero = sha1_zero_message_hash;
994 digest_len = SHA1_DIGEST_SIZE;
995 break;
996 case CCP_SHA_TYPE_224:
997 sha_zero = sha224_zero_message_hash;
998 digest_len = SHA224_DIGEST_SIZE;
999 break;
1000 case CCP_SHA_TYPE_256:
1001 sha_zero = sha256_zero_message_hash;
1002 digest_len = SHA256_DIGEST_SIZE;
1003 break;
1004 default:
1005 return -EINVAL;
1006 }
63b94509 1007
4b394a23
GH
1008 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1009 digest_len, 1);
1010
1011 return 0;
1012 }
63b94509
TL
1013 }
1014
4b394a23
GH
1015 /* Set variables used throughout */
1016 switch (sha->type) {
1017 case CCP_SHA_TYPE_1:
1018 digest_size = SHA1_DIGEST_SIZE;
1019 init = (void *) ccp_sha1_init;
1020 ctx_size = SHA1_DIGEST_SIZE;
1021 sb_count = 1;
1022 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1023 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1024 else
1025 ooffset = ioffset = 0;
1026 break;
1027 case CCP_SHA_TYPE_224:
1028 digest_size = SHA224_DIGEST_SIZE;
1029 init = (void *) ccp_sha224_init;
1030 ctx_size = SHA256_DIGEST_SIZE;
1031 sb_count = 1;
1032 ioffset = 0;
1033 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1034 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1035 else
1036 ooffset = 0;
1037 break;
1038 case CCP_SHA_TYPE_256:
1039 digest_size = SHA256_DIGEST_SIZE;
1040 init = (void *) ccp_sha256_init;
1041 ctx_size = SHA256_DIGEST_SIZE;
1042 sb_count = 1;
1043 ooffset = ioffset = 0;
1044 break;
1045 default:
1046 ret = -EINVAL;
1047 goto e_data;
1048 }
63b94509 1049
4b394a23
GH
1050 /* For zero-length plaintext the src pointer is ignored;
1051 * otherwise both parts must be valid
1052 */
1053 if (sha->src_len && !sha->src)
1054 return -EINVAL;
63b94509
TL
1055
1056 memset(&op, 0, sizeof(op));
1057 op.cmd_q = cmd_q;
4b394a23
GH
1058 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1059 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
63b94509
TL
1060 op.u.sha.type = sha->type;
1061 op.u.sha.msg_bits = sha->msg_bits;
1062
4b394a23 1063 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
63b94509
TL
1064 DMA_BIDIRECTIONAL);
1065 if (ret)
1066 return ret;
c11baa02 1067 if (sha->first) {
c11baa02
TL
1068 switch (sha->type) {
1069 case CCP_SHA_TYPE_1:
c11baa02 1070 case CCP_SHA_TYPE_224:
c11baa02 1071 case CCP_SHA_TYPE_256:
4b394a23 1072 memcpy(ctx.address + ioffset, init, ctx_size);
c11baa02
TL
1073 break;
1074 default:
1075 ret = -EINVAL;
1076 goto e_ctx;
1077 }
8db88467 1078 } else {
4b394a23
GH
1079 /* Restore the context */
1080 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1081 sb_count * CCP_SB_BYTES);
8db88467 1082 }
c11baa02 1083
956ee21a
GH
1084 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1085 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1086 if (ret) {
1087 cmd->engine_error = cmd_q->cmd_error;
1088 goto e_ctx;
1089 }
1090
4b394a23
GH
1091 if (sha->src) {
1092 /* Send data to the CCP SHA engine; block_size is set above */
1093 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1094 block_size, DMA_TO_DEVICE);
1095 if (ret)
1096 goto e_ctx;
63b94509 1097
4b394a23
GH
1098 while (src.sg_wa.bytes_left) {
1099 ccp_prepare_data(&src, NULL, &op, block_size, false);
1100 if (sha->final && !src.sg_wa.bytes_left)
1101 op.eom = 1;
1102
1103 ret = cmd_q->ccp->vdata->perform->sha(&op);
1104 if (ret) {
1105 cmd->engine_error = cmd_q->cmd_error;
1106 goto e_data;
1107 }
63b94509 1108
4b394a23
GH
1109 ccp_process_data(&src, NULL, &op);
1110 }
1111 } else {
1112 op.eom = 1;
a43eb985 1113 ret = cmd_q->ccp->vdata->perform->sha(&op);
63b94509
TL
1114 if (ret) {
1115 cmd->engine_error = cmd_q->cmd_error;
1116 goto e_data;
1117 }
63b94509
TL
1118 }
1119
1120 /* Retrieve the SHA context - convert from LE to BE using
1121 * 32-byte (256-bit) byteswapping to BE
1122 */
956ee21a
GH
1123 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1124 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1125 if (ret) {
1126 cmd->engine_error = cmd_q->cmd_error;
1127 goto e_data;
1128 }
1129
4b394a23
GH
1130 if (sha->final) {
1131 /* Finishing up, so get the digest */
c11baa02
TL
1132 switch (sha->type) {
1133 case CCP_SHA_TYPE_1:
c11baa02 1134 case CCP_SHA_TYPE_224:
c11baa02 1135 case CCP_SHA_TYPE_256:
4b394a23
GH
1136 ccp_get_dm_area(&ctx, ooffset,
1137 sha->ctx, 0,
1138 digest_size);
c11baa02
TL
1139 break;
1140 default:
1141 ret = -EINVAL;
4b394a23 1142 goto e_ctx;
c11baa02 1143 }
4b394a23
GH
1144 } else {
1145 /* Stash the context */
1146 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1147 sb_count * CCP_SB_BYTES);
1148 }
1149
1150 if (sha->final && sha->opad) {
1151 /* HMAC operation, recursively perform final SHA */
1152 struct ccp_cmd hmac_cmd;
1153 struct scatterlist sg;
1154 u8 *hmac_buf;
c11baa02
TL
1155
1156 if (sha->opad_len != block_size) {
1157 ret = -EINVAL;
1158 goto e_data;
1159 }
1160
1161 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1162 if (!hmac_buf) {
1163 ret = -ENOMEM;
1164 goto e_data;
1165 }
1166 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1167
1168 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
4b394a23
GH
1169 switch (sha->type) {
1170 case CCP_SHA_TYPE_1:
1171 case CCP_SHA_TYPE_224:
1172 case CCP_SHA_TYPE_256:
1173 memcpy(hmac_buf + block_size,
1174 ctx.address + ooffset,
1175 digest_size);
1176 break;
1177 default:
1178 ret = -EINVAL;
1179 goto e_ctx;
1180 }
c11baa02
TL
1181
1182 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1183 hmac_cmd.engine = CCP_ENGINE_SHA;
1184 hmac_cmd.u.sha.type = sha->type;
1185 hmac_cmd.u.sha.ctx = sha->ctx;
1186 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1187 hmac_cmd.u.sha.src = &sg;
1188 hmac_cmd.u.sha.src_len = block_size + digest_size;
1189 hmac_cmd.u.sha.opad = NULL;
1190 hmac_cmd.u.sha.opad_len = 0;
1191 hmac_cmd.u.sha.first = 1;
1192 hmac_cmd.u.sha.final = 1;
1193 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1194
1195 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1196 if (ret)
1197 cmd->engine_error = hmac_cmd.engine_error;
1198
1199 kfree(hmac_buf);
1200 }
1201
63b94509 1202e_data:
4b394a23
GH
1203 if (sha->src)
1204 ccp_free_data(&src, cmd_q);
63b94509
TL
1205
1206e_ctx:
1207 ccp_dm_free(&ctx);
1208
1209 return ret;
1210}
1211
1212static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1213{
1214 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1215 struct ccp_dm_workarea exp, src;
1216 struct ccp_data dst;
1217 struct ccp_op op;
956ee21a 1218 unsigned int sb_count, i_len, o_len;
63b94509
TL
1219 int ret;
1220
1221 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1222 return -EINVAL;
1223
1224 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1225 return -EINVAL;
1226
1227 /* The RSA modulus must precede the message being acted upon, so
1228 * it must be copied to a DMA area where the message and the
1229 * modulus can be concatenated. Therefore the input buffer
1230 * length required is twice the output buffer length (which
1231 * must be a multiple of 256-bits).
1232 */
1233 o_len = ((rsa->key_size + 255) / 256) * 32;
1234 i_len = o_len * 2;
1235
956ee21a 1236 sb_count = o_len / CCP_SB_BYTES;
63b94509
TL
1237
1238 memset(&op, 0, sizeof(op));
1239 op.cmd_q = cmd_q;
1240 op.jobid = ccp_gen_jobid(cmd_q->ccp);
58a690b7
GH
1241 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1242
956ee21a 1243 if (!op.sb_key)
63b94509
TL
1244 return -EIO;
1245
956ee21a 1246 /* The RSA exponent may span multiple (32-byte) SB entries and must
63b94509
TL
1247 * be in little endian format. Reverse copy each 32-byte chunk
1248 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1249 * and each byte within that chunk and do not perform any byte swap
1250 * operations on the passthru operation.
1251 */
1252 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1253 if (ret)
956ee21a 1254 goto e_sb;
63b94509 1255
83d650ab 1256 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
355eba5d
TL
1257 if (ret)
1258 goto e_exp;
956ee21a
GH
1259 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1260 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1261 if (ret) {
1262 cmd->engine_error = cmd_q->cmd_error;
1263 goto e_exp;
1264 }
1265
1266 /* Concatenate the modulus and the message. Both the modulus and
1267 * the operands must be in little endian format. Since the input
1268 * is in big endian format it must be converted.
1269 */
1270 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1271 if (ret)
1272 goto e_exp;
1273
83d650ab 1274 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
355eba5d
TL
1275 if (ret)
1276 goto e_src;
83d650ab 1277 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
355eba5d
TL
1278 if (ret)
1279 goto e_src;
63b94509
TL
1280
1281 /* Prepare the output area for the operation */
1282 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1283 o_len, DMA_FROM_DEVICE);
1284 if (ret)
1285 goto e_src;
1286
1287 op.soc = 1;
1288 op.src.u.dma.address = src.dma.address;
1289 op.src.u.dma.offset = 0;
1290 op.src.u.dma.length = i_len;
1291 op.dst.u.dma.address = dst.dm_wa.dma.address;
1292 op.dst.u.dma.offset = 0;
1293 op.dst.u.dma.length = o_len;
1294
1295 op.u.rsa.mod_size = rsa->key_size;
1296 op.u.rsa.input_len = i_len;
1297
a43eb985 1298 ret = cmd_q->ccp->vdata->perform->rsa(&op);
63b94509
TL
1299 if (ret) {
1300 cmd->engine_error = cmd_q->cmd_error;
1301 goto e_dst;
1302 }
1303
83d650ab 1304 ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
63b94509
TL
1305
1306e_dst:
1307 ccp_free_data(&dst, cmd_q);
1308
1309e_src:
1310 ccp_dm_free(&src);
1311
1312e_exp:
1313 ccp_dm_free(&exp);
1314
956ee21a 1315e_sb:
58a690b7 1316 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
63b94509
TL
1317
1318 return ret;
1319}
1320
1321static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1322 struct ccp_cmd *cmd)
1323{
1324 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1325 struct ccp_dm_workarea mask;
1326 struct ccp_data src, dst;
1327 struct ccp_op op;
1328 bool in_place = false;
1329 unsigned int i;
4b394a23 1330 int ret = 0;
63b94509
TL
1331
1332 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1333 return -EINVAL;
1334
1335 if (!pt->src || !pt->dst)
1336 return -EINVAL;
1337
1338 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1339 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1340 return -EINVAL;
1341 if (!pt->mask)
1342 return -EINVAL;
1343 }
1344
956ee21a 1345 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
63b94509
TL
1346
1347 memset(&op, 0, sizeof(op));
1348 op.cmd_q = cmd_q;
4b394a23 1349 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1350
1351 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1352 /* Load the mask */
956ee21a 1353 op.sb_key = cmd_q->sb_key;
63b94509
TL
1354
1355 ret = ccp_init_dm_workarea(&mask, cmd_q,
956ee21a
GH
1356 CCP_PASSTHRU_SB_COUNT *
1357 CCP_SB_BYTES,
63b94509
TL
1358 DMA_TO_DEVICE);
1359 if (ret)
1360 return ret;
1361
1362 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
956ee21a
GH
1363 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1364 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1365 if (ret) {
1366 cmd->engine_error = cmd_q->cmd_error;
1367 goto e_mask;
1368 }
1369 }
1370
1371 /* Prepare the input and output data workareas. For in-place
1372 * operations we need to set the dma direction to BIDIRECTIONAL
1373 * and copy the src workarea to the dst workarea.
1374 */
1375 if (sg_virt(pt->src) == sg_virt(pt->dst))
1376 in_place = true;
1377
1378 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1379 CCP_PASSTHRU_MASKSIZE,
1380 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1381 if (ret)
1382 goto e_mask;
1383
8db88467 1384 if (in_place) {
63b94509 1385 dst = src;
8db88467 1386 } else {
63b94509
TL
1387 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1388 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1389 if (ret)
1390 goto e_src;
1391 }
1392
1393 /* Send data to the CCP Passthru engine
1394 * Because the CCP engine works on a single source and destination
1395 * dma address at a time, each entry in the source scatterlist
1396 * (after the dma_map_sg call) must be less than or equal to the
1397 * (remaining) length in the destination scatterlist entry and the
1398 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1399 */
1400 dst.sg_wa.sg_used = 0;
1401 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1402 if (!dst.sg_wa.sg ||
1403 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1404 ret = -EINVAL;
1405 goto e_dst;
1406 }
1407
1408 if (i == src.sg_wa.dma_count) {
1409 op.eom = 1;
1410 op.soc = 1;
1411 }
1412
1413 op.src.type = CCP_MEMTYPE_SYSTEM;
1414 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1415 op.src.u.dma.offset = 0;
1416 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1417
1418 op.dst.type = CCP_MEMTYPE_SYSTEM;
1419 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
80e84c16
DJ
1420 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1421 op.dst.u.dma.length = op.src.u.dma.length;
63b94509 1422
a43eb985 1423 ret = cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
1424 if (ret) {
1425 cmd->engine_error = cmd_q->cmd_error;
1426 goto e_dst;
1427 }
1428
1429 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1430 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1431 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1432 dst.sg_wa.sg_used = 0;
1433 }
1434 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1435 }
1436
1437e_dst:
1438 if (!in_place)
1439 ccp_free_data(&dst, cmd_q);
1440
1441e_src:
1442 ccp_free_data(&src, cmd_q);
1443
1444e_mask:
1445 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1446 ccp_dm_free(&mask);
1447
1448 return ret;
1449}
1450
58ea8abf
GH
1451static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1452 struct ccp_cmd *cmd)
1453{
1454 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1455 struct ccp_dm_workarea mask;
1456 struct ccp_op op;
1457 int ret;
1458
1459 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1460 return -EINVAL;
1461
1462 if (!pt->src_dma || !pt->dst_dma)
1463 return -EINVAL;
1464
1465 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1466 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1467 return -EINVAL;
1468 if (!pt->mask)
1469 return -EINVAL;
1470 }
1471
956ee21a 1472 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
58ea8abf
GH
1473
1474 memset(&op, 0, sizeof(op));
1475 op.cmd_q = cmd_q;
1476 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1477
1478 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1479 /* Load the mask */
956ee21a 1480 op.sb_key = cmd_q->sb_key;
58ea8abf
GH
1481
1482 mask.length = pt->mask_len;
1483 mask.dma.address = pt->mask;
1484 mask.dma.length = pt->mask_len;
1485
956ee21a 1486 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
58ea8abf
GH
1487 CCP_PASSTHRU_BYTESWAP_NOOP);
1488 if (ret) {
1489 cmd->engine_error = cmd_q->cmd_error;
1490 return ret;
1491 }
1492 }
1493
1494 /* Send data to the CCP Passthru engine */
1495 op.eom = 1;
1496 op.soc = 1;
1497
1498 op.src.type = CCP_MEMTYPE_SYSTEM;
1499 op.src.u.dma.address = pt->src_dma;
1500 op.src.u.dma.offset = 0;
1501 op.src.u.dma.length = pt->src_len;
1502
1503 op.dst.type = CCP_MEMTYPE_SYSTEM;
1504 op.dst.u.dma.address = pt->dst_dma;
1505 op.dst.u.dma.offset = 0;
1506 op.dst.u.dma.length = pt->src_len;
1507
a43eb985 1508 ret = cmd_q->ccp->vdata->perform->passthru(&op);
58ea8abf
GH
1509 if (ret)
1510 cmd->engine_error = cmd_q->cmd_error;
1511
1512 return ret;
1513}
1514
63b94509
TL
1515static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1516{
1517 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1518 struct ccp_dm_workarea src, dst;
1519 struct ccp_op op;
1520 int ret;
1521 u8 *save;
1522
1523 if (!ecc->u.mm.operand_1 ||
1524 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1525 return -EINVAL;
1526
1527 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1528 if (!ecc->u.mm.operand_2 ||
1529 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1530 return -EINVAL;
1531
1532 if (!ecc->u.mm.result ||
1533 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1534 return -EINVAL;
1535
1536 memset(&op, 0, sizeof(op));
1537 op.cmd_q = cmd_q;
4b394a23 1538 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1539
1540 /* Concatenate the modulus and the operands. Both the modulus and
1541 * the operands must be in little endian format. Since the input
1542 * is in big endian format it must be converted and placed in a
1543 * fixed length buffer.
1544 */
1545 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1546 DMA_TO_DEVICE);
1547 if (ret)
1548 return ret;
1549
1550 /* Save the workarea address since it is updated in order to perform
1551 * the concatenation
1552 */
1553 save = src.address;
1554
1555 /* Copy the ECC modulus */
83d650ab 1556 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
1557 if (ret)
1558 goto e_src;
63b94509
TL
1559 src.address += CCP_ECC_OPERAND_SIZE;
1560
1561 /* Copy the first operand */
83d650ab
GH
1562 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
1563 ecc->u.mm.operand_1_len);
355eba5d
TL
1564 if (ret)
1565 goto e_src;
63b94509
TL
1566 src.address += CCP_ECC_OPERAND_SIZE;
1567
1568 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1569 /* Copy the second operand */
83d650ab
GH
1570 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
1571 ecc->u.mm.operand_2_len);
355eba5d
TL
1572 if (ret)
1573 goto e_src;
63b94509
TL
1574 src.address += CCP_ECC_OPERAND_SIZE;
1575 }
1576
1577 /* Restore the workarea address */
1578 src.address = save;
1579
1580 /* Prepare the output area for the operation */
1581 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1582 DMA_FROM_DEVICE);
1583 if (ret)
1584 goto e_src;
1585
1586 op.soc = 1;
1587 op.src.u.dma.address = src.dma.address;
1588 op.src.u.dma.offset = 0;
1589 op.src.u.dma.length = src.length;
1590 op.dst.u.dma.address = dst.dma.address;
1591 op.dst.u.dma.offset = 0;
1592 op.dst.u.dma.length = dst.length;
1593
1594 op.u.ecc.function = cmd->u.ecc.function;
1595
a43eb985 1596 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
1597 if (ret) {
1598 cmd->engine_error = cmd_q->cmd_error;
1599 goto e_dst;
1600 }
1601
1602 ecc->ecc_result = le16_to_cpup(
1603 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1604 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1605 ret = -EIO;
1606 goto e_dst;
1607 }
1608
1609 /* Save the ECC result */
83d650ab
GH
1610 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
1611 CCP_ECC_MODULUS_BYTES);
63b94509
TL
1612
1613e_dst:
1614 ccp_dm_free(&dst);
1615
1616e_src:
1617 ccp_dm_free(&src);
1618
1619 return ret;
1620}
1621
1622static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1623{
1624 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1625 struct ccp_dm_workarea src, dst;
1626 struct ccp_op op;
1627 int ret;
1628 u8 *save;
1629
1630 if (!ecc->u.pm.point_1.x ||
1631 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1632 !ecc->u.pm.point_1.y ||
1633 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1634 return -EINVAL;
1635
1636 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1637 if (!ecc->u.pm.point_2.x ||
1638 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1639 !ecc->u.pm.point_2.y ||
1640 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1641 return -EINVAL;
1642 } else {
1643 if (!ecc->u.pm.domain_a ||
1644 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1645 return -EINVAL;
1646
1647 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1648 if (!ecc->u.pm.scalar ||
1649 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1650 return -EINVAL;
1651 }
1652
1653 if (!ecc->u.pm.result.x ||
1654 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1655 !ecc->u.pm.result.y ||
1656 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1657 return -EINVAL;
1658
1659 memset(&op, 0, sizeof(op));
1660 op.cmd_q = cmd_q;
4b394a23 1661 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1662
1663 /* Concatenate the modulus and the operands. Both the modulus and
1664 * the operands must be in little endian format. Since the input
1665 * is in big endian format it must be converted and placed in a
1666 * fixed length buffer.
1667 */
1668 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1669 DMA_TO_DEVICE);
1670 if (ret)
1671 return ret;
1672
1673 /* Save the workarea address since it is updated in order to perform
1674 * the concatenation
1675 */
1676 save = src.address;
1677
1678 /* Copy the ECC modulus */
83d650ab 1679 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
1680 if (ret)
1681 goto e_src;
63b94509
TL
1682 src.address += CCP_ECC_OPERAND_SIZE;
1683
1684 /* Copy the first point X and Y coordinate */
83d650ab
GH
1685 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
1686 ecc->u.pm.point_1.x_len);
355eba5d
TL
1687 if (ret)
1688 goto e_src;
63b94509 1689 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
1690 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
1691 ecc->u.pm.point_1.y_len);
355eba5d
TL
1692 if (ret)
1693 goto e_src;
63b94509
TL
1694 src.address += CCP_ECC_OPERAND_SIZE;
1695
4b394a23 1696 /* Set the first point Z coordinate to 1 */
8db88467 1697 *src.address = 0x01;
63b94509
TL
1698 src.address += CCP_ECC_OPERAND_SIZE;
1699
1700 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1701 /* Copy the second point X and Y coordinate */
83d650ab
GH
1702 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
1703 ecc->u.pm.point_2.x_len);
355eba5d
TL
1704 if (ret)
1705 goto e_src;
63b94509 1706 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
1707 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
1708 ecc->u.pm.point_2.y_len);
355eba5d
TL
1709 if (ret)
1710 goto e_src;
63b94509
TL
1711 src.address += CCP_ECC_OPERAND_SIZE;
1712
4b394a23 1713 /* Set the second point Z coordinate to 1 */
8db88467 1714 *src.address = 0x01;
63b94509
TL
1715 src.address += CCP_ECC_OPERAND_SIZE;
1716 } else {
1717 /* Copy the Domain "a" parameter */
83d650ab
GH
1718 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
1719 ecc->u.pm.domain_a_len);
355eba5d
TL
1720 if (ret)
1721 goto e_src;
63b94509
TL
1722 src.address += CCP_ECC_OPERAND_SIZE;
1723
1724 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1725 /* Copy the scalar value */
83d650ab
GH
1726 ret = ccp_reverse_set_dm_area(&src, 0,
1727 ecc->u.pm.scalar, 0,
1728 ecc->u.pm.scalar_len);
355eba5d
TL
1729 if (ret)
1730 goto e_src;
63b94509
TL
1731 src.address += CCP_ECC_OPERAND_SIZE;
1732 }
1733 }
1734
1735 /* Restore the workarea address */
1736 src.address = save;
1737
1738 /* Prepare the output area for the operation */
1739 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1740 DMA_FROM_DEVICE);
1741 if (ret)
1742 goto e_src;
1743
1744 op.soc = 1;
1745 op.src.u.dma.address = src.dma.address;
1746 op.src.u.dma.offset = 0;
1747 op.src.u.dma.length = src.length;
1748 op.dst.u.dma.address = dst.dma.address;
1749 op.dst.u.dma.offset = 0;
1750 op.dst.u.dma.length = dst.length;
1751
1752 op.u.ecc.function = cmd->u.ecc.function;
1753
a43eb985 1754 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
1755 if (ret) {
1756 cmd->engine_error = cmd_q->cmd_error;
1757 goto e_dst;
1758 }
1759
1760 ecc->ecc_result = le16_to_cpup(
1761 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1762 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1763 ret = -EIO;
1764 goto e_dst;
1765 }
1766
1767 /* Save the workarea address since it is updated as we walk through
1768 * to copy the point math result
1769 */
1770 save = dst.address;
1771
1772 /* Save the ECC result X and Y coordinates */
83d650ab 1773 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
63b94509
TL
1774 CCP_ECC_MODULUS_BYTES);
1775 dst.address += CCP_ECC_OUTPUT_SIZE;
83d650ab 1776 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
63b94509
TL
1777 CCP_ECC_MODULUS_BYTES);
1778 dst.address += CCP_ECC_OUTPUT_SIZE;
1779
1780 /* Restore the workarea address */
1781 dst.address = save;
1782
1783e_dst:
1784 ccp_dm_free(&dst);
1785
1786e_src:
1787 ccp_dm_free(&src);
1788
1789 return ret;
1790}
1791
1792static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1793{
1794 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1795
1796 ecc->ecc_result = 0;
1797
1798 if (!ecc->mod ||
1799 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1800 return -EINVAL;
1801
1802 switch (ecc->function) {
1803 case CCP_ECC_FUNCTION_MMUL_384BIT:
1804 case CCP_ECC_FUNCTION_MADD_384BIT:
1805 case CCP_ECC_FUNCTION_MINV_384BIT:
1806 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1807
1808 case CCP_ECC_FUNCTION_PADD_384BIT:
1809 case CCP_ECC_FUNCTION_PMUL_384BIT:
1810 case CCP_ECC_FUNCTION_PDBL_384BIT:
1811 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1812
1813 default:
1814 return -EINVAL;
1815 }
1816}
1817
1818int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1819{
1820 int ret;
1821
1822 cmd->engine_error = 0;
1823 cmd_q->cmd_error = 0;
1824 cmd_q->int_rcvd = 0;
bb4e89b3 1825 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
63b94509
TL
1826
1827 switch (cmd->engine) {
1828 case CCP_ENGINE_AES:
1829 ret = ccp_run_aes_cmd(cmd_q, cmd);
1830 break;
1831 case CCP_ENGINE_XTS_AES_128:
1832 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1833 break;
1834 case CCP_ENGINE_SHA:
1835 ret = ccp_run_sha_cmd(cmd_q, cmd);
1836 break;
1837 case CCP_ENGINE_RSA:
1838 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1839 break;
1840 case CCP_ENGINE_PASSTHRU:
58ea8abf
GH
1841 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1842 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1843 else
1844 ret = ccp_run_passthru_cmd(cmd_q, cmd);
63b94509
TL
1845 break;
1846 case CCP_ENGINE_ECC:
1847 ret = ccp_run_ecc_cmd(cmd_q, cmd);
1848 break;
1849 default:
1850 ret = -EINVAL;
1851 }
1852
1853 return ret;
1854}