]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/crypto/ccp/ccp-ops.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-jammy-kernel.git] / drivers / crypto / ccp / ccp-ops.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
63b94509
TL
2/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
fa5cd1c7 5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
63b94509
TL
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
a43eb985 8 * Author: Gary R Hook <gary.hook@amd.com>
63b94509
TL
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/pci.h>
63b94509 14#include <linux/interrupt.h>
63b94509 15#include <crypto/scatterwalk.h>
990672d4 16#include <crypto/des.h>
ea0375af 17#include <linux/ccp.h>
63b94509
TL
18
19#include "ccp-dev.h"
20
c11baa02 21/* SHA initial context values */
4b394a23 22static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
4b394a23 25 cpu_to_be32(SHA1_H4),
c11baa02
TL
26};
27
4b394a23 28static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
33};
34
4b394a23 35static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
40};
41
ccebcf3f
GH
42static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
47};
48
49static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
54};
55
4b394a23
GH
56#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
57 ccp_gen_jobid(ccp) : 0)
58
63b94509
TL
59static u32 ccp_gen_jobid(struct ccp_device *ccp)
60{
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
62}
63
64static void ccp_sg_free(struct ccp_sg_workarea *wa)
65{
66 if (wa->dma_count)
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
68
69 wa->dma_count = 0;
70}
71
72static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
81a59f00 73 struct scatterlist *sg, u64 len,
63b94509
TL
74 enum dma_data_direction dma_dir)
75{
76 memset(wa, 0, sizeof(*wa));
77
78 wa->sg = sg;
79 if (!sg)
80 return 0;
81
fb43f694
TL
82 wa->nents = sg_nents_for_len(sg, len);
83 if (wa->nents < 0)
84 return wa->nents;
85
63b94509
TL
86 wa->bytes_left = len;
87 wa->sg_used = 0;
88
89 if (len == 0)
90 return 0;
91
92 if (dma_dir == DMA_NONE)
93 return 0;
94
95 wa->dma_sg = sg;
96 wa->dma_dev = dev;
97 wa->dma_dir = dma_dir;
98 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
99 if (!wa->dma_count)
100 return -ENOMEM;
101
63b94509
TL
102 return 0;
103}
104
105static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
106{
81a59f00 107 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
63b94509
TL
108
109 if (!wa->sg)
110 return;
111
112 wa->sg_used += nbytes;
113 wa->bytes_left -= nbytes;
114 if (wa->sg_used == wa->sg->length) {
115 wa->sg = sg_next(wa->sg);
116 wa->sg_used = 0;
117 }
118}
119
120static void ccp_dm_free(struct ccp_dm_workarea *wa)
121{
122 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
123 if (wa->address)
124 dma_pool_free(wa->dma_pool, wa->address,
125 wa->dma.address);
126 } else {
127 if (wa->dma.address)
128 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
129 wa->dma.dir);
130 kfree(wa->address);
131 }
132
133 wa->address = NULL;
134 wa->dma.address = 0;
135}
136
137static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
138 struct ccp_cmd_queue *cmd_q,
139 unsigned int len,
140 enum dma_data_direction dir)
141{
142 memset(wa, 0, sizeof(*wa));
143
144 if (!len)
145 return 0;
146
147 wa->dev = cmd_q->ccp->dev;
148 wa->length = len;
149
150 if (len <= CCP_DMAPOOL_MAX_SIZE) {
151 wa->dma_pool = cmd_q->dma_pool;
152
153 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
154 &wa->dma.address);
155 if (!wa->address)
156 return -ENOMEM;
157
158 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
159
160 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
161 } else {
162 wa->address = kzalloc(len, GFP_KERNEL);
163 if (!wa->address)
164 return -ENOMEM;
165
166 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
167 dir);
ef4064bb 168 if (dma_mapping_error(wa->dev, wa->dma.address))
63b94509
TL
169 return -ENOMEM;
170
171 wa->dma.length = len;
172 }
173 wa->dma.dir = dir;
174
175 return 0;
176}
177
b698a9f4
GH
178static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
179 struct scatterlist *sg, unsigned int sg_offset,
180 unsigned int len)
63b94509
TL
181{
182 WARN_ON(!wa->address);
183
b698a9f4
GH
184 if (len > (wa->length - wa_offset))
185 return -EINVAL;
186
63b94509
TL
187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
188 0);
b698a9f4 189 return 0;
63b94509
TL
190}
191
192static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
193 struct scatterlist *sg, unsigned int sg_offset,
194 unsigned int len)
195{
196 WARN_ON(!wa->address);
197
198 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
199 1);
200}
201
355eba5d 202static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
83d650ab 203 unsigned int wa_offset,
355eba5d 204 struct scatterlist *sg,
83d650ab
GH
205 unsigned int sg_offset,
206 unsigned int len)
63b94509 207{
83d650ab 208 u8 *p, *q;
b698a9f4 209 int rc;
83d650ab 210
b698a9f4
GH
211 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
212 if (rc)
213 return rc;
83d650ab
GH
214
215 p = wa->address + wa_offset;
216 q = p + len - 1;
217 while (p < q) {
218 *p = *p ^ *q;
219 *q = *p ^ *q;
220 *p = *p ^ *q;
221 p++;
222 q--;
63b94509 223 }
355eba5d 224 return 0;
63b94509
TL
225}
226
227static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
83d650ab 228 unsigned int wa_offset,
63b94509 229 struct scatterlist *sg,
83d650ab 230 unsigned int sg_offset,
63b94509
TL
231 unsigned int len)
232{
83d650ab
GH
233 u8 *p, *q;
234
235 p = wa->address + wa_offset;
236 q = p + len - 1;
237 while (p < q) {
238 *p = *p ^ *q;
239 *q = *p ^ *q;
240 *p = *p ^ *q;
241 p++;
242 q--;
63b94509 243 }
83d650ab
GH
244
245 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
63b94509
TL
246}
247
248static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
249{
250 ccp_dm_free(&data->dm_wa);
251 ccp_sg_free(&data->sg_wa);
252}
253
254static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
81a59f00 255 struct scatterlist *sg, u64 sg_len,
63b94509
TL
256 unsigned int dm_len,
257 enum dma_data_direction dir)
258{
259 int ret;
260
261 memset(data, 0, sizeof(*data));
262
263 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
264 dir);
265 if (ret)
266 goto e_err;
267
268 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
269 if (ret)
270 goto e_err;
271
272 return 0;
273
274e_err:
275 ccp_free_data(data, cmd_q);
276
277 return ret;
278}
279
280static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
281{
282 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
283 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
284 unsigned int buf_count, nbytes;
285
286 /* Clear the buffer if setting it */
287 if (!from)
288 memset(dm_wa->address, 0, dm_wa->length);
289
290 if (!sg_wa->sg)
291 return 0;
292
81a59f00
TL
293 /* Perform the copy operation
294 * nbytes will always be <= UINT_MAX because dm_wa->length is
295 * an unsigned int
296 */
297 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
63b94509
TL
298 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
299 nbytes, from);
300
301 /* Update the structures and generate the count */
302 buf_count = 0;
303 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
81a59f00
TL
304 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
305 dm_wa->length - buf_count);
306 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
63b94509
TL
307
308 buf_count += nbytes;
309 ccp_update_sg_workarea(sg_wa, nbytes);
310 }
311
312 return buf_count;
313}
314
315static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
316{
317 return ccp_queue_buf(data, 0);
318}
319
320static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
321{
322 return ccp_queue_buf(data, 1);
323}
324
325static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
326 struct ccp_op *op, unsigned int block_size,
327 bool blocksize_op)
328{
329 unsigned int sg_src_len, sg_dst_len, op_len;
330
331 /* The CCP can only DMA from/to one address each per operation. This
332 * requires that we find the smallest DMA area between the source
81a59f00
TL
333 * and destination. The resulting len values will always be <= UINT_MAX
334 * because the dma length is an unsigned int.
63b94509 335 */
81a59f00
TL
336 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
337 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
63b94509
TL
338
339 if (dst) {
81a59f00
TL
340 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
341 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
63b94509 342 op_len = min(sg_src_len, sg_dst_len);
8db88467 343 } else {
63b94509 344 op_len = sg_src_len;
8db88467 345 }
63b94509
TL
346
347 /* The data operation length will be at least block_size in length
348 * or the smaller of available sg room remaining for the source or
349 * the destination
350 */
351 op_len = max(op_len, block_size);
352
353 /* Unless we have to buffer data, there's no reason to wait */
354 op->soc = 0;
355
356 if (sg_src_len < block_size) {
357 /* Not enough data in the sg element, so it
358 * needs to be buffered into a blocksize chunk
359 */
360 int cp_len = ccp_fill_queue_buf(src);
361
362 op->soc = 1;
363 op->src.u.dma.address = src->dm_wa.dma.address;
364 op->src.u.dma.offset = 0;
365 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
366 } else {
367 /* Enough data in the sg element, but we need to
368 * adjust for any previously copied data
369 */
370 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
371 op->src.u.dma.offset = src->sg_wa.sg_used;
372 op->src.u.dma.length = op_len & ~(block_size - 1);
373
374 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
375 }
376
377 if (dst) {
378 if (sg_dst_len < block_size) {
379 /* Not enough room in the sg element or we're on the
380 * last piece of data (when using padding), so the
381 * output needs to be buffered into a blocksize chunk
382 */
383 op->soc = 1;
384 op->dst.u.dma.address = dst->dm_wa.dma.address;
385 op->dst.u.dma.offset = 0;
386 op->dst.u.dma.length = op->src.u.dma.length;
387 } else {
388 /* Enough room in the sg element, but we need to
389 * adjust for any previously used area
390 */
391 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
392 op->dst.u.dma.offset = dst->sg_wa.sg_used;
393 op->dst.u.dma.length = op->src.u.dma.length;
394 }
395 }
396}
397
398static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
399 struct ccp_op *op)
400{
401 op->init = 0;
402
403 if (dst) {
404 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
405 ccp_empty_queue_buf(dst);
406 else
407 ccp_update_sg_workarea(&dst->sg_wa,
408 op->dst.u.dma.length);
409 }
410}
411
956ee21a
GH
412static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
413 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
414 u32 byte_swap, bool from)
63b94509
TL
415{
416 struct ccp_op op;
417
418 memset(&op, 0, sizeof(op));
419
420 op.cmd_q = cmd_q;
421 op.jobid = jobid;
422 op.eom = 1;
423
424 if (from) {
425 op.soc = 1;
956ee21a
GH
426 op.src.type = CCP_MEMTYPE_SB;
427 op.src.u.sb = sb;
63b94509
TL
428 op.dst.type = CCP_MEMTYPE_SYSTEM;
429 op.dst.u.dma.address = wa->dma.address;
430 op.dst.u.dma.length = wa->length;
431 } else {
432 op.src.type = CCP_MEMTYPE_SYSTEM;
433 op.src.u.dma.address = wa->dma.address;
434 op.src.u.dma.length = wa->length;
956ee21a
GH
435 op.dst.type = CCP_MEMTYPE_SB;
436 op.dst.u.sb = sb;
63b94509
TL
437 }
438
439 op.u.passthru.byte_swap = byte_swap;
440
a43eb985 441 return cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
442}
443
956ee21a
GH
444static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
445 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
446 u32 byte_swap)
63b94509 447{
956ee21a 448 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
63b94509
TL
449}
450
956ee21a
GH
451static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
452 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
453 u32 byte_swap)
63b94509 454{
956ee21a 455 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
63b94509
TL
456}
457
458static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
459 struct ccp_cmd *cmd)
460{
461 struct ccp_aes_engine *aes = &cmd->u.aes;
462 struct ccp_dm_workarea key, ctx;
463 struct ccp_data src;
464 struct ccp_op op;
465 unsigned int dm_offset;
466 int ret;
467
468 if (!((aes->key_len == AES_KEYSIZE_128) ||
469 (aes->key_len == AES_KEYSIZE_192) ||
470 (aes->key_len == AES_KEYSIZE_256)))
471 return -EINVAL;
472
473 if (aes->src_len & (AES_BLOCK_SIZE - 1))
474 return -EINVAL;
475
476 if (aes->iv_len != AES_BLOCK_SIZE)
477 return -EINVAL;
478
479 if (!aes->key || !aes->iv || !aes->src)
480 return -EINVAL;
481
482 if (aes->cmac_final) {
483 if (aes->cmac_key_len != AES_BLOCK_SIZE)
484 return -EINVAL;
485
486 if (!aes->cmac_key)
487 return -EINVAL;
488 }
489
956ee21a
GH
490 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
491 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
492
493 ret = -EIO;
494 memset(&op, 0, sizeof(op));
495 op.cmd_q = cmd_q;
4b394a23 496 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
497 op.sb_key = cmd_q->sb_key;
498 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
499 op.init = 1;
500 op.u.aes.type = aes->type;
501 op.u.aes.mode = aes->mode;
502 op.u.aes.action = aes->action;
503
956ee21a 504 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
505 * and must be in little endian format. Use the 256-bit byte
506 * swap passthru option to convert from big endian to little
507 * endian.
508 */
509 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 510 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
511 DMA_TO_DEVICE);
512 if (ret)
513 return ret;
514
956ee21a 515 dm_offset = CCP_SB_BYTES - aes->key_len;
b698a9f4
GH
516 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
517 if (ret)
518 goto e_key;
956ee21a
GH
519 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
520 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
521 if (ret) {
522 cmd->engine_error = cmd_q->cmd_error;
523 goto e_key;
524 }
525
956ee21a 526 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
527 * must be in little endian format. Use the 256-bit byte swap
528 * passthru option to convert from big endian to little endian.
529 */
530 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 531 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
532 DMA_BIDIRECTIONAL);
533 if (ret)
534 goto e_key;
535
956ee21a 536 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
b698a9f4
GH
537 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
538 if (ret)
539 goto e_ctx;
956ee21a
GH
540 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
541 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
542 if (ret) {
543 cmd->engine_error = cmd_q->cmd_error;
544 goto e_ctx;
545 }
546
547 /* Send data to the CCP AES engine */
548 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
549 AES_BLOCK_SIZE, DMA_TO_DEVICE);
550 if (ret)
551 goto e_ctx;
552
553 while (src.sg_wa.bytes_left) {
554 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
555 if (aes->cmac_final && !src.sg_wa.bytes_left) {
556 op.eom = 1;
557
558 /* Push the K1/K2 key to the CCP now */
956ee21a
GH
559 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
560 op.sb_ctx,
561 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
562 if (ret) {
563 cmd->engine_error = cmd_q->cmd_error;
564 goto e_src;
565 }
566
b698a9f4
GH
567 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
568 aes->cmac_key_len);
569 if (ret)
570 goto e_src;
956ee21a
GH
571 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
572 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
573 if (ret) {
574 cmd->engine_error = cmd_q->cmd_error;
575 goto e_src;
576 }
577 }
578
a43eb985 579 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
580 if (ret) {
581 cmd->engine_error = cmd_q->cmd_error;
582 goto e_src;
583 }
584
585 ccp_process_data(&src, NULL, &op);
586 }
587
588 /* Retrieve the AES context - convert from LE to BE using
589 * 32-byte (256-bit) byteswapping
590 */
956ee21a
GH
591 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
592 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
593 if (ret) {
594 cmd->engine_error = cmd_q->cmd_error;
595 goto e_src;
596 }
597
598 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 599 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
600 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
601
602e_src:
603 ccp_free_data(&src, cmd_q);
604
605e_ctx:
606 ccp_dm_free(&ctx);
607
608e_key:
609 ccp_dm_free(&key);
610
611 return ret;
612}
613
36cf515b
GH
614static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
615 struct ccp_cmd *cmd)
616{
617 struct ccp_aes_engine *aes = &cmd->u.aes;
618 struct ccp_dm_workarea key, ctx, final_wa, tag;
619 struct ccp_data src, dst;
620 struct ccp_data aad;
621 struct ccp_op op;
622
623 unsigned long long *final;
624 unsigned int dm_offset;
625 unsigned int ilen;
626 bool in_place = true; /* Default value */
627 int ret;
628
629 struct scatterlist *p_inp, sg_inp[2];
630 struct scatterlist *p_tag, sg_tag[2];
631 struct scatterlist *p_outp, sg_outp[2];
632 struct scatterlist *p_aad;
633
634 if (!aes->iv)
635 return -EINVAL;
636
637 if (!((aes->key_len == AES_KEYSIZE_128) ||
638 (aes->key_len == AES_KEYSIZE_192) ||
639 (aes->key_len == AES_KEYSIZE_256)))
640 return -EINVAL;
641
642 if (!aes->key) /* Gotta have a key SGL */
643 return -EINVAL;
644
645 /* First, decompose the source buffer into AAD & PT,
646 * and the destination buffer into AAD, CT & tag, or
647 * the input into CT & tag.
648 * It is expected that the input and output SGs will
649 * be valid, even if the AAD and input lengths are 0.
650 */
651 p_aad = aes->src;
652 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
653 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
654 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
655 ilen = aes->src_len;
656 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
657 } else {
658 /* Input length for decryption includes tag */
659 ilen = aes->src_len - AES_BLOCK_SIZE;
660 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
661 }
662
663 memset(&op, 0, sizeof(op));
664 op.cmd_q = cmd_q;
665 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
666 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
667 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
668 op.init = 1;
669 op.u.aes.type = aes->type;
670
671 /* Copy the key to the LSB */
672 ret = ccp_init_dm_workarea(&key, cmd_q,
673 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
674 DMA_TO_DEVICE);
675 if (ret)
676 return ret;
677
678 dm_offset = CCP_SB_BYTES - aes->key_len;
b698a9f4
GH
679 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
680 if (ret)
681 goto e_key;
36cf515b
GH
682 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
683 CCP_PASSTHRU_BYTESWAP_256BIT);
684 if (ret) {
685 cmd->engine_error = cmd_q->cmd_error;
686 goto e_key;
687 }
688
689 /* Copy the context (IV) to the LSB.
690 * There is an assumption here that the IV is 96 bits in length, plus
691 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
692 */
693 ret = ccp_init_dm_workarea(&ctx, cmd_q,
694 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
695 DMA_BIDIRECTIONAL);
696 if (ret)
697 goto e_key;
698
699 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
b698a9f4
GH
700 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
701 if (ret)
702 goto e_ctx;
36cf515b
GH
703
704 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
705 CCP_PASSTHRU_BYTESWAP_256BIT);
706 if (ret) {
707 cmd->engine_error = cmd_q->cmd_error;
708 goto e_ctx;
709 }
710
711 op.init = 1;
712 if (aes->aad_len > 0) {
713 /* Step 1: Run a GHASH over the Additional Authenticated Data */
714 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
715 AES_BLOCK_SIZE,
716 DMA_TO_DEVICE);
717 if (ret)
718 goto e_ctx;
719
720 op.u.aes.mode = CCP_AES_MODE_GHASH;
721 op.u.aes.action = CCP_AES_GHASHAAD;
722
723 while (aad.sg_wa.bytes_left) {
724 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
725
726 ret = cmd_q->ccp->vdata->perform->aes(&op);
727 if (ret) {
728 cmd->engine_error = cmd_q->cmd_error;
729 goto e_aad;
730 }
731
732 ccp_process_data(&aad, NULL, &op);
733 op.init = 0;
734 }
735 }
736
737 op.u.aes.mode = CCP_AES_MODE_GCTR;
738 op.u.aes.action = aes->action;
739
740 if (ilen > 0) {
741 /* Step 2: Run a GCTR over the plaintext */
742 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
743
744 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
745 AES_BLOCK_SIZE,
746 in_place ? DMA_BIDIRECTIONAL
747 : DMA_TO_DEVICE);
748 if (ret)
749 goto e_ctx;
750
751 if (in_place) {
752 dst = src;
753 } else {
754 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
755 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
756 if (ret)
757 goto e_src;
758 }
759
760 op.soc = 0;
761 op.eom = 0;
762 op.init = 1;
763 while (src.sg_wa.bytes_left) {
764 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
765 if (!src.sg_wa.bytes_left) {
766 unsigned int nbytes = aes->src_len
767 % AES_BLOCK_SIZE;
768
769 if (nbytes) {
770 op.eom = 1;
771 op.u.aes.size = (nbytes * 8) - 1;
772 }
773 }
774
775 ret = cmd_q->ccp->vdata->perform->aes(&op);
776 if (ret) {
777 cmd->engine_error = cmd_q->cmd_error;
778 goto e_dst;
779 }
780
781 ccp_process_data(&src, &dst, &op);
782 op.init = 0;
783 }
784 }
785
786 /* Step 3: Update the IV portion of the context with the original IV */
787 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
788 CCP_PASSTHRU_BYTESWAP_256BIT);
789 if (ret) {
790 cmd->engine_error = cmd_q->cmd_error;
791 goto e_dst;
792 }
793
b698a9f4
GH
794 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
795 if (ret)
796 goto e_dst;
36cf515b
GH
797
798 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
799 CCP_PASSTHRU_BYTESWAP_256BIT);
800 if (ret) {
801 cmd->engine_error = cmd_q->cmd_error;
802 goto e_dst;
803 }
804
805 /* Step 4: Concatenate the lengths of the AAD and source, and
806 * hash that 16 byte buffer.
807 */
808 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
809 DMA_BIDIRECTIONAL);
810 if (ret)
811 goto e_dst;
812 final = (unsigned long long *) final_wa.address;
813 final[0] = cpu_to_be64(aes->aad_len * 8);
814 final[1] = cpu_to_be64(ilen * 8);
815
816 op.u.aes.mode = CCP_AES_MODE_GHASH;
817 op.u.aes.action = CCP_AES_GHASHFINAL;
818 op.src.type = CCP_MEMTYPE_SYSTEM;
819 op.src.u.dma.address = final_wa.dma.address;
820 op.src.u.dma.length = AES_BLOCK_SIZE;
821 op.dst.type = CCP_MEMTYPE_SYSTEM;
822 op.dst.u.dma.address = final_wa.dma.address;
823 op.dst.u.dma.length = AES_BLOCK_SIZE;
824 op.eom = 1;
825 op.u.aes.size = 0;
826 ret = cmd_q->ccp->vdata->perform->aes(&op);
827 if (ret)
828 goto e_dst;
829
830 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
831 /* Put the ciphered tag after the ciphertext. */
832 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
833 } else {
834 /* Does this ciphered tag match the input? */
835 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
836 DMA_BIDIRECTIONAL);
837 if (ret)
838 goto e_tag;
b698a9f4
GH
839 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
840 if (ret)
841 goto e_tag;
36cf515b
GH
842
843 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
844 ccp_dm_free(&tag);
845 }
846
847e_tag:
848 ccp_dm_free(&final_wa);
849
850e_dst:
851 if (aes->src_len && !in_place)
852 ccp_free_data(&dst, cmd_q);
853
854e_src:
855 if (aes->src_len)
856 ccp_free_data(&src, cmd_q);
857
858e_aad:
859 if (aes->aad_len)
860 ccp_free_data(&aad, cmd_q);
861
862e_ctx:
863 ccp_dm_free(&ctx);
864
865e_key:
866 ccp_dm_free(&key);
867
868 return ret;
869}
870
63b94509
TL
871static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
872{
873 struct ccp_aes_engine *aes = &cmd->u.aes;
874 struct ccp_dm_workarea key, ctx;
875 struct ccp_data src, dst;
876 struct ccp_op op;
877 unsigned int dm_offset;
878 bool in_place = false;
879 int ret;
880
881 if (aes->mode == CCP_AES_MODE_CMAC)
882 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
883
36cf515b
GH
884 if (aes->mode == CCP_AES_MODE_GCM)
885 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
886
63b94509
TL
887 if (!((aes->key_len == AES_KEYSIZE_128) ||
888 (aes->key_len == AES_KEYSIZE_192) ||
889 (aes->key_len == AES_KEYSIZE_256)))
890 return -EINVAL;
891
892 if (((aes->mode == CCP_AES_MODE_ECB) ||
893 (aes->mode == CCP_AES_MODE_CBC) ||
894 (aes->mode == CCP_AES_MODE_CFB)) &&
895 (aes->src_len & (AES_BLOCK_SIZE - 1)))
896 return -EINVAL;
897
898 if (!aes->key || !aes->src || !aes->dst)
899 return -EINVAL;
900
901 if (aes->mode != CCP_AES_MODE_ECB) {
902 if (aes->iv_len != AES_BLOCK_SIZE)
903 return -EINVAL;
904
905 if (!aes->iv)
906 return -EINVAL;
907 }
908
956ee21a
GH
909 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
910 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
911
912 ret = -EIO;
913 memset(&op, 0, sizeof(op));
914 op.cmd_q = cmd_q;
4b394a23 915 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
916 op.sb_key = cmd_q->sb_key;
917 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
918 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
919 op.u.aes.type = aes->type;
920 op.u.aes.mode = aes->mode;
921 op.u.aes.action = aes->action;
922
956ee21a 923 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
924 * and must be in little endian format. Use the 256-bit byte
925 * swap passthru option to convert from big endian to little
926 * endian.
927 */
928 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 929 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
930 DMA_TO_DEVICE);
931 if (ret)
932 return ret;
933
956ee21a 934 dm_offset = CCP_SB_BYTES - aes->key_len;
b698a9f4
GH
935 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
936 if (ret)
937 goto e_key;
956ee21a
GH
938 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
939 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
940 if (ret) {
941 cmd->engine_error = cmd_q->cmd_error;
942 goto e_key;
943 }
944
956ee21a 945 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
946 * must be in little endian format. Use the 256-bit byte swap
947 * passthru option to convert from big endian to little endian.
948 */
949 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 950 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
951 DMA_BIDIRECTIONAL);
952 if (ret)
953 goto e_key;
954
955 if (aes->mode != CCP_AES_MODE_ECB) {
4b394a23 956 /* Load the AES context - convert to LE */
956ee21a 957 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
b698a9f4
GH
958 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
959 if (ret)
960 goto e_ctx;
956ee21a
GH
961 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
962 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
963 if (ret) {
964 cmd->engine_error = cmd_q->cmd_error;
965 goto e_ctx;
966 }
967 }
f7cc02b3
GH
968 switch (aes->mode) {
969 case CCP_AES_MODE_CFB: /* CFB128 only */
970 case CCP_AES_MODE_CTR:
971 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
972 break;
973 default:
974 op.u.aes.size = 0;
975 }
63b94509
TL
976
977 /* Prepare the input and output data workareas. For in-place
978 * operations we need to set the dma direction to BIDIRECTIONAL
979 * and copy the src workarea to the dst workarea.
980 */
981 if (sg_virt(aes->src) == sg_virt(aes->dst))
982 in_place = true;
983
984 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
985 AES_BLOCK_SIZE,
986 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
987 if (ret)
988 goto e_ctx;
989
8db88467 990 if (in_place) {
63b94509 991 dst = src;
8db88467 992 } else {
63b94509
TL
993 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
994 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
995 if (ret)
996 goto e_src;
997 }
998
999 /* Send data to the CCP AES engine */
1000 while (src.sg_wa.bytes_left) {
1001 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1002 if (!src.sg_wa.bytes_left) {
1003 op.eom = 1;
1004
1005 /* Since we don't retrieve the AES context in ECB
1006 * mode we have to wait for the operation to complete
1007 * on the last piece of data
1008 */
1009 if (aes->mode == CCP_AES_MODE_ECB)
1010 op.soc = 1;
1011 }
1012
a43eb985 1013 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
1014 if (ret) {
1015 cmd->engine_error = cmd_q->cmd_error;
1016 goto e_dst;
1017 }
1018
1019 ccp_process_data(&src, &dst, &op);
1020 }
1021
1022 if (aes->mode != CCP_AES_MODE_ECB) {
1023 /* Retrieve the AES context - convert from LE to BE using
1024 * 32-byte (256-bit) byteswapping
1025 */
956ee21a
GH
1026 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1027 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1028 if (ret) {
1029 cmd->engine_error = cmd_q->cmd_error;
1030 goto e_dst;
1031 }
1032
1033 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1034 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1035 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1036 }
1037
1038e_dst:
1039 if (!in_place)
1040 ccp_free_data(&dst, cmd_q);
1041
1042e_src:
1043 ccp_free_data(&src, cmd_q);
1044
1045e_ctx:
1046 ccp_dm_free(&ctx);
1047
1048e_key:
1049 ccp_dm_free(&key);
1050
1051 return ret;
1052}
1053
1054static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1055 struct ccp_cmd *cmd)
1056{
1057 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1058 struct ccp_dm_workarea key, ctx;
1059 struct ccp_data src, dst;
1060 struct ccp_op op;
1061 unsigned int unit_size, dm_offset;
1062 bool in_place = false;
e652399e
GH
1063 unsigned int sb_count;
1064 enum ccp_aes_type aestype;
63b94509
TL
1065 int ret;
1066
1067 switch (xts->unit_size) {
1068 case CCP_XTS_AES_UNIT_SIZE_16:
1069 unit_size = 16;
1070 break;
1071 case CCP_XTS_AES_UNIT_SIZE_512:
1072 unit_size = 512;
1073 break;
1074 case CCP_XTS_AES_UNIT_SIZE_1024:
1075 unit_size = 1024;
1076 break;
1077 case CCP_XTS_AES_UNIT_SIZE_2048:
1078 unit_size = 2048;
1079 break;
1080 case CCP_XTS_AES_UNIT_SIZE_4096:
1081 unit_size = 4096;
1082 break;
1083
1084 default:
1085 return -EINVAL;
1086 }
1087
e652399e
GH
1088 if (xts->key_len == AES_KEYSIZE_128)
1089 aestype = CCP_AES_TYPE_128;
5060ffc9
GH
1090 else if (xts->key_len == AES_KEYSIZE_256)
1091 aestype = CCP_AES_TYPE_256;
e652399e 1092 else
63b94509
TL
1093 return -EINVAL;
1094
1095 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1096 return -EINVAL;
1097
1098 if (xts->iv_len != AES_BLOCK_SIZE)
1099 return -EINVAL;
1100
1101 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1102 return -EINVAL;
1103
956ee21a
GH
1104 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1105 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
63b94509
TL
1106
1107 ret = -EIO;
1108 memset(&op, 0, sizeof(op));
1109 op.cmd_q = cmd_q;
4b394a23 1110 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
1111 op.sb_key = cmd_q->sb_key;
1112 op.sb_ctx = cmd_q->sb_ctx;
63b94509 1113 op.init = 1;
e652399e 1114 op.u.xts.type = aestype;
63b94509
TL
1115 op.u.xts.action = xts->action;
1116 op.u.xts.unit_size = xts->unit_size;
1117
e652399e
GH
1118 /* A version 3 device only supports 128-bit keys, which fits into a
1119 * single SB entry. A version 5 device uses a 512-bit vector, so two
1120 * SB entries.
63b94509 1121 */
e652399e
GH
1122 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1123 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1124 else
1125 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
63b94509 1126 ret = ccp_init_dm_workarea(&key, cmd_q,
e652399e 1127 sb_count * CCP_SB_BYTES,
63b94509
TL
1128 DMA_TO_DEVICE);
1129 if (ret)
1130 return ret;
1131
e652399e
GH
1132 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1133 /* All supported key sizes must be in little endian format.
1134 * Use the 256-bit byte swap passthru option to convert from
1135 * big endian to little endian.
1136 */
1137 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
b698a9f4
GH
1138 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1139 if (ret)
1140 goto e_key;
1141 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1142 if (ret)
1143 goto e_key;
e652399e
GH
1144 } else {
1145 /* Version 5 CCPs use a 512-bit space for the key: each portion
1146 * occupies 256 bits, or one entire slot, and is zero-padded.
1147 */
1148 unsigned int pad;
1149
1150 dm_offset = CCP_SB_BYTES;
1151 pad = dm_offset - xts->key_len;
b698a9f4
GH
1152 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1153 if (ret)
1154 goto e_key;
1155 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1156 xts->key_len, xts->key_len);
1157 if (ret)
1158 goto e_key;
e652399e 1159 }
956ee21a
GH
1160 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1161 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1162 if (ret) {
1163 cmd->engine_error = cmd_q->cmd_error;
1164 goto e_key;
1165 }
1166
956ee21a 1167 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
1168 * for XTS is already in little endian format so no byte swapping
1169 * is needed.
1170 */
1171 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 1172 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
1173 DMA_BIDIRECTIONAL);
1174 if (ret)
1175 goto e_key;
1176
b698a9f4
GH
1177 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1178 if (ret)
1179 goto e_ctx;
956ee21a
GH
1180 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1181 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1182 if (ret) {
1183 cmd->engine_error = cmd_q->cmd_error;
1184 goto e_ctx;
1185 }
1186
1187 /* Prepare the input and output data workareas. For in-place
1188 * operations we need to set the dma direction to BIDIRECTIONAL
1189 * and copy the src workarea to the dst workarea.
1190 */
1191 if (sg_virt(xts->src) == sg_virt(xts->dst))
1192 in_place = true;
1193
1194 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1195 unit_size,
1196 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1197 if (ret)
1198 goto e_ctx;
1199
8db88467 1200 if (in_place) {
63b94509 1201 dst = src;
8db88467 1202 } else {
63b94509
TL
1203 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1204 unit_size, DMA_FROM_DEVICE);
1205 if (ret)
1206 goto e_src;
1207 }
1208
1209 /* Send data to the CCP AES engine */
1210 while (src.sg_wa.bytes_left) {
1211 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1212 if (!src.sg_wa.bytes_left)
1213 op.eom = 1;
1214
a43eb985 1215 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
63b94509
TL
1216 if (ret) {
1217 cmd->engine_error = cmd_q->cmd_error;
1218 goto e_dst;
1219 }
1220
1221 ccp_process_data(&src, &dst, &op);
1222 }
1223
1224 /* Retrieve the AES context - convert from LE to BE using
1225 * 32-byte (256-bit) byteswapping
1226 */
956ee21a
GH
1227 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1228 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1229 if (ret) {
1230 cmd->engine_error = cmd_q->cmd_error;
1231 goto e_dst;
1232 }
1233
1234 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1235 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1236 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1237
1238e_dst:
1239 if (!in_place)
1240 ccp_free_data(&dst, cmd_q);
1241
1242e_src:
1243 ccp_free_data(&src, cmd_q);
1244
1245e_ctx:
1246 ccp_dm_free(&ctx);
1247
1248e_key:
1249 ccp_dm_free(&key);
1250
1251 return ret;
1252}
1253
990672d4
GH
1254static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1255{
1256 struct ccp_des3_engine *des3 = &cmd->u.des3;
1257
1258 struct ccp_dm_workarea key, ctx;
1259 struct ccp_data src, dst;
1260 struct ccp_op op;
1261 unsigned int dm_offset;
1262 unsigned int len_singlekey;
1263 bool in_place = false;
1264 int ret;
1265
1266 /* Error checks */
1267 if (!cmd_q->ccp->vdata->perform->des3)
1268 return -EINVAL;
1269
1270 if (des3->key_len != DES3_EDE_KEY_SIZE)
1271 return -EINVAL;
1272
1273 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1274 (des3->mode == CCP_DES3_MODE_CBC)) &&
1275 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1276 return -EINVAL;
1277
1278 if (!des3->key || !des3->src || !des3->dst)
1279 return -EINVAL;
1280
1281 if (des3->mode != CCP_DES3_MODE_ECB) {
1282 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1283 return -EINVAL;
1284
1285 if (!des3->iv)
1286 return -EINVAL;
1287 }
1288
1289 ret = -EIO;
1290 /* Zero out all the fields of the command desc */
1291 memset(&op, 0, sizeof(op));
1292
1293 /* Set up the Function field */
1294 op.cmd_q = cmd_q;
1295 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1296 op.sb_key = cmd_q->sb_key;
1297
1298 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1299 op.u.des3.type = des3->type;
1300 op.u.des3.mode = des3->mode;
1301 op.u.des3.action = des3->action;
1302
1303 /*
1304 * All supported key sizes fit in a single (32-byte) KSB entry and
1305 * (like AES) must be in little endian format. Use the 256-bit byte
1306 * swap passthru option to convert from big endian to little endian.
1307 */
1308 ret = ccp_init_dm_workarea(&key, cmd_q,
1309 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1310 DMA_TO_DEVICE);
1311 if (ret)
1312 return ret;
1313
1314 /*
1315 * The contents of the key triplet are in the reverse order of what
1316 * is required by the engine. Copy the 3 pieces individually to put
1317 * them where they belong.
1318 */
1319 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1320
1321 len_singlekey = des3->key_len / 3;
b698a9f4
GH
1322 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1323 des3->key, 0, len_singlekey);
1324 if (ret)
1325 goto e_key;
1326 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1327 des3->key, len_singlekey, len_singlekey);
1328 if (ret)
1329 goto e_key;
1330 ret = ccp_set_dm_area(&key, dm_offset,
1331 des3->key, 2 * len_singlekey, len_singlekey);
1332 if (ret)
1333 goto e_key;
990672d4
GH
1334
1335 /* Copy the key to the SB */
1336 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1337 CCP_PASSTHRU_BYTESWAP_256BIT);
1338 if (ret) {
1339 cmd->engine_error = cmd_q->cmd_error;
1340 goto e_key;
1341 }
1342
1343 /*
1344 * The DES3 context fits in a single (32-byte) KSB entry and
1345 * must be in little endian format. Use the 256-bit byte swap
1346 * passthru option to convert from big endian to little endian.
1347 */
1348 if (des3->mode != CCP_DES3_MODE_ECB) {
1349 u32 load_mode;
1350
1351 op.sb_ctx = cmd_q->sb_ctx;
1352
1353 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1354 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1355 DMA_BIDIRECTIONAL);
1356 if (ret)
1357 goto e_key;
1358
1359 /* Load the context into the LSB */
1360 dm_offset = CCP_SB_BYTES - des3->iv_len;
b698a9f4
GH
1361 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1362 des3->iv_len);
1363 if (ret)
1364 goto e_ctx;
990672d4
GH
1365
1366 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1367 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1368 else
1369 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1370 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1371 load_mode);
1372 if (ret) {
1373 cmd->engine_error = cmd_q->cmd_error;
1374 goto e_ctx;
1375 }
1376 }
1377
1378 /*
1379 * Prepare the input and output data workareas. For in-place
1380 * operations we need to set the dma direction to BIDIRECTIONAL
1381 * and copy the src workarea to the dst workarea.
1382 */
1383 if (sg_virt(des3->src) == sg_virt(des3->dst))
1384 in_place = true;
1385
1386 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1387 DES3_EDE_BLOCK_SIZE,
1388 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1389 if (ret)
1390 goto e_ctx;
1391
1392 if (in_place)
1393 dst = src;
1394 else {
1395 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1396 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1397 if (ret)
1398 goto e_src;
1399 }
1400
1401 /* Send data to the CCP DES3 engine */
1402 while (src.sg_wa.bytes_left) {
1403 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1404 if (!src.sg_wa.bytes_left) {
1405 op.eom = 1;
1406
1407 /* Since we don't retrieve the context in ECB mode
1408 * we have to wait for the operation to complete
1409 * on the last piece of data
1410 */
1411 op.soc = 0;
1412 }
1413
1414 ret = cmd_q->ccp->vdata->perform->des3(&op);
1415 if (ret) {
1416 cmd->engine_error = cmd_q->cmd_error;
1417 goto e_dst;
1418 }
1419
1420 ccp_process_data(&src, &dst, &op);
1421 }
1422
1423 if (des3->mode != CCP_DES3_MODE_ECB) {
1424 /* Retrieve the context and make BE */
1425 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1426 CCP_PASSTHRU_BYTESWAP_256BIT);
1427 if (ret) {
1428 cmd->engine_error = cmd_q->cmd_error;
1429 goto e_dst;
1430 }
1431
1432 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1433 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1434 dm_offset = CCP_SB_BYTES - des3->iv_len;
1435 else
1436 dm_offset = 0;
1437 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1438 DES3_EDE_BLOCK_SIZE);
1439 }
1440e_dst:
1441 if (!in_place)
1442 ccp_free_data(&dst, cmd_q);
1443
1444e_src:
1445 ccp_free_data(&src, cmd_q);
1446
1447e_ctx:
1448 if (des3->mode != CCP_DES3_MODE_ECB)
1449 ccp_dm_free(&ctx);
1450
1451e_key:
1452 ccp_dm_free(&key);
1453
1454 return ret;
1455}
1456
63b94509
TL
1457static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1458{
1459 struct ccp_sha_engine *sha = &cmd->u.sha;
1460 struct ccp_dm_workarea ctx;
1461 struct ccp_data src;
1462 struct ccp_op op;
4b394a23
GH
1463 unsigned int ioffset, ooffset;
1464 unsigned int digest_size;
1465 int sb_count;
1466 const void *init;
1467 u64 block_size;
1468 int ctx_size;
63b94509
TL
1469 int ret;
1470
4b394a23
GH
1471 switch (sha->type) {
1472 case CCP_SHA_TYPE_1:
1473 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1474 return -EINVAL;
1475 block_size = SHA1_BLOCK_SIZE;
1476 break;
1477 case CCP_SHA_TYPE_224:
1478 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1479 return -EINVAL;
1480 block_size = SHA224_BLOCK_SIZE;
1481 break;
1482 case CCP_SHA_TYPE_256:
1483 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1484 return -EINVAL;
1485 block_size = SHA256_BLOCK_SIZE;
1486 break;
ccebcf3f
GH
1487 case CCP_SHA_TYPE_384:
1488 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1489 || sha->ctx_len < SHA384_DIGEST_SIZE)
1490 return -EINVAL;
1491 block_size = SHA384_BLOCK_SIZE;
1492 break;
1493 case CCP_SHA_TYPE_512:
1494 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1495 || sha->ctx_len < SHA512_DIGEST_SIZE)
1496 return -EINVAL;
1497 block_size = SHA512_BLOCK_SIZE;
1498 break;
4b394a23 1499 default:
63b94509 1500 return -EINVAL;
4b394a23 1501 }
63b94509
TL
1502
1503 if (!sha->ctx)
1504 return -EINVAL;
1505
4b394a23 1506 if (!sha->final && (sha->src_len & (block_size - 1)))
63b94509
TL
1507 return -EINVAL;
1508
4b394a23
GH
1509 /* The version 3 device can't handle zero-length input */
1510 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
63b94509 1511
4b394a23
GH
1512 if (!sha->src_len) {
1513 unsigned int digest_len;
1514 const u8 *sha_zero;
63b94509 1515
4b394a23
GH
1516 /* Not final, just return */
1517 if (!sha->final)
1518 return 0;
63b94509 1519
4b394a23
GH
1520 /* CCP can't do a zero length sha operation so the
1521 * caller must buffer the data.
1522 */
1523 if (sha->msg_bits)
1524 return -EINVAL;
63b94509 1525
4b394a23
GH
1526 /* The CCP cannot perform zero-length sha operations
1527 * so the caller is required to buffer data for the
1528 * final operation. However, a sha operation for a
1529 * message with a total length of zero is valid so
1530 * known values are required to supply the result.
1531 */
1532 switch (sha->type) {
1533 case CCP_SHA_TYPE_1:
1534 sha_zero = sha1_zero_message_hash;
1535 digest_len = SHA1_DIGEST_SIZE;
1536 break;
1537 case CCP_SHA_TYPE_224:
1538 sha_zero = sha224_zero_message_hash;
1539 digest_len = SHA224_DIGEST_SIZE;
1540 break;
1541 case CCP_SHA_TYPE_256:
1542 sha_zero = sha256_zero_message_hash;
1543 digest_len = SHA256_DIGEST_SIZE;
1544 break;
1545 default:
1546 return -EINVAL;
1547 }
63b94509 1548
4b394a23
GH
1549 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1550 digest_len, 1);
1551
1552 return 0;
1553 }
63b94509
TL
1554 }
1555
4b394a23
GH
1556 /* Set variables used throughout */
1557 switch (sha->type) {
1558 case CCP_SHA_TYPE_1:
1559 digest_size = SHA1_DIGEST_SIZE;
1560 init = (void *) ccp_sha1_init;
1561 ctx_size = SHA1_DIGEST_SIZE;
1562 sb_count = 1;
1563 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1564 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1565 else
1566 ooffset = ioffset = 0;
1567 break;
1568 case CCP_SHA_TYPE_224:
1569 digest_size = SHA224_DIGEST_SIZE;
1570 init = (void *) ccp_sha224_init;
1571 ctx_size = SHA256_DIGEST_SIZE;
1572 sb_count = 1;
1573 ioffset = 0;
1574 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1575 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1576 else
1577 ooffset = 0;
1578 break;
1579 case CCP_SHA_TYPE_256:
1580 digest_size = SHA256_DIGEST_SIZE;
1581 init = (void *) ccp_sha256_init;
1582 ctx_size = SHA256_DIGEST_SIZE;
1583 sb_count = 1;
1584 ooffset = ioffset = 0;
1585 break;
ccebcf3f
GH
1586 case CCP_SHA_TYPE_384:
1587 digest_size = SHA384_DIGEST_SIZE;
1588 init = (void *) ccp_sha384_init;
1589 ctx_size = SHA512_DIGEST_SIZE;
1590 sb_count = 2;
1591 ioffset = 0;
1592 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1593 break;
1594 case CCP_SHA_TYPE_512:
1595 digest_size = SHA512_DIGEST_SIZE;
1596 init = (void *) ccp_sha512_init;
1597 ctx_size = SHA512_DIGEST_SIZE;
1598 sb_count = 2;
1599 ooffset = ioffset = 0;
1600 break;
4b394a23
GH
1601 default:
1602 ret = -EINVAL;
1603 goto e_data;
1604 }
63b94509 1605
4b394a23
GH
1606 /* For zero-length plaintext the src pointer is ignored;
1607 * otherwise both parts must be valid
1608 */
1609 if (sha->src_len && !sha->src)
1610 return -EINVAL;
63b94509
TL
1611
1612 memset(&op, 0, sizeof(op));
1613 op.cmd_q = cmd_q;
4b394a23
GH
1614 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1615 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
63b94509
TL
1616 op.u.sha.type = sha->type;
1617 op.u.sha.msg_bits = sha->msg_bits;
1618
ccebcf3f
GH
1619 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1620 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1621 * first slot, and the left half in the second. Each portion must then
1622 * be in little endian format: use the 256-bit byte swap option.
1623 */
4b394a23 1624 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
63b94509
TL
1625 DMA_BIDIRECTIONAL);
1626 if (ret)
1627 return ret;
c11baa02 1628 if (sha->first) {
c11baa02
TL
1629 switch (sha->type) {
1630 case CCP_SHA_TYPE_1:
c11baa02 1631 case CCP_SHA_TYPE_224:
c11baa02 1632 case CCP_SHA_TYPE_256:
4b394a23 1633 memcpy(ctx.address + ioffset, init, ctx_size);
c11baa02 1634 break;
ccebcf3f
GH
1635 case CCP_SHA_TYPE_384:
1636 case CCP_SHA_TYPE_512:
1637 memcpy(ctx.address + ctx_size / 2, init,
1638 ctx_size / 2);
1639 memcpy(ctx.address, init + ctx_size / 2,
1640 ctx_size / 2);
1641 break;
c11baa02
TL
1642 default:
1643 ret = -EINVAL;
1644 goto e_ctx;
1645 }
8db88467 1646 } else {
4b394a23 1647 /* Restore the context */
b698a9f4
GH
1648 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1649 sb_count * CCP_SB_BYTES);
1650 if (ret)
1651 goto e_ctx;
8db88467 1652 }
c11baa02 1653
956ee21a
GH
1654 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1655 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1656 if (ret) {
1657 cmd->engine_error = cmd_q->cmd_error;
1658 goto e_ctx;
1659 }
1660
4b394a23
GH
1661 if (sha->src) {
1662 /* Send data to the CCP SHA engine; block_size is set above */
1663 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1664 block_size, DMA_TO_DEVICE);
1665 if (ret)
1666 goto e_ctx;
63b94509 1667
4b394a23
GH
1668 while (src.sg_wa.bytes_left) {
1669 ccp_prepare_data(&src, NULL, &op, block_size, false);
1670 if (sha->final && !src.sg_wa.bytes_left)
1671 op.eom = 1;
1672
1673 ret = cmd_q->ccp->vdata->perform->sha(&op);
1674 if (ret) {
1675 cmd->engine_error = cmd_q->cmd_error;
1676 goto e_data;
1677 }
63b94509 1678
4b394a23
GH
1679 ccp_process_data(&src, NULL, &op);
1680 }
1681 } else {
1682 op.eom = 1;
a43eb985 1683 ret = cmd_q->ccp->vdata->perform->sha(&op);
63b94509
TL
1684 if (ret) {
1685 cmd->engine_error = cmd_q->cmd_error;
1686 goto e_data;
1687 }
63b94509
TL
1688 }
1689
1690 /* Retrieve the SHA context - convert from LE to BE using
1691 * 32-byte (256-bit) byteswapping to BE
1692 */
956ee21a
GH
1693 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1694 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1695 if (ret) {
1696 cmd->engine_error = cmd_q->cmd_error;
1697 goto e_data;
1698 }
1699
4b394a23
GH
1700 if (sha->final) {
1701 /* Finishing up, so get the digest */
c11baa02
TL
1702 switch (sha->type) {
1703 case CCP_SHA_TYPE_1:
c11baa02 1704 case CCP_SHA_TYPE_224:
c11baa02 1705 case CCP_SHA_TYPE_256:
4b394a23
GH
1706 ccp_get_dm_area(&ctx, ooffset,
1707 sha->ctx, 0,
1708 digest_size);
c11baa02 1709 break;
ccebcf3f
GH
1710 case CCP_SHA_TYPE_384:
1711 case CCP_SHA_TYPE_512:
1712 ccp_get_dm_area(&ctx, 0,
1713 sha->ctx, LSB_ITEM_SIZE - ooffset,
1714 LSB_ITEM_SIZE);
1715 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1716 sha->ctx, 0,
1717 LSB_ITEM_SIZE - ooffset);
1718 break;
c11baa02
TL
1719 default:
1720 ret = -EINVAL;
4b394a23 1721 goto e_ctx;
c11baa02 1722 }
4b394a23
GH
1723 } else {
1724 /* Stash the context */
1725 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1726 sb_count * CCP_SB_BYTES);
1727 }
1728
1729 if (sha->final && sha->opad) {
1730 /* HMAC operation, recursively perform final SHA */
1731 struct ccp_cmd hmac_cmd;
1732 struct scatterlist sg;
1733 u8 *hmac_buf;
c11baa02
TL
1734
1735 if (sha->opad_len != block_size) {
1736 ret = -EINVAL;
1737 goto e_data;
1738 }
1739
1740 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1741 if (!hmac_buf) {
1742 ret = -ENOMEM;
1743 goto e_data;
1744 }
1745 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1746
1747 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
4b394a23
GH
1748 switch (sha->type) {
1749 case CCP_SHA_TYPE_1:
1750 case CCP_SHA_TYPE_224:
1751 case CCP_SHA_TYPE_256:
1752 memcpy(hmac_buf + block_size,
1753 ctx.address + ooffset,
1754 digest_size);
1755 break;
ccebcf3f
GH
1756 case CCP_SHA_TYPE_384:
1757 case CCP_SHA_TYPE_512:
1758 memcpy(hmac_buf + block_size,
1759 ctx.address + LSB_ITEM_SIZE + ooffset,
1760 LSB_ITEM_SIZE);
1761 memcpy(hmac_buf + block_size +
1762 (LSB_ITEM_SIZE - ooffset),
1763 ctx.address,
1764 LSB_ITEM_SIZE);
1765 break;
4b394a23
GH
1766 default:
1767 ret = -EINVAL;
1768 goto e_ctx;
1769 }
c11baa02
TL
1770
1771 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1772 hmac_cmd.engine = CCP_ENGINE_SHA;
1773 hmac_cmd.u.sha.type = sha->type;
1774 hmac_cmd.u.sha.ctx = sha->ctx;
1775 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1776 hmac_cmd.u.sha.src = &sg;
1777 hmac_cmd.u.sha.src_len = block_size + digest_size;
1778 hmac_cmd.u.sha.opad = NULL;
1779 hmac_cmd.u.sha.opad_len = 0;
1780 hmac_cmd.u.sha.first = 1;
1781 hmac_cmd.u.sha.final = 1;
1782 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1783
1784 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1785 if (ret)
1786 cmd->engine_error = hmac_cmd.engine_error;
1787
1788 kfree(hmac_buf);
1789 }
1790
63b94509 1791e_data:
4b394a23
GH
1792 if (sha->src)
1793 ccp_free_data(&src, cmd_q);
63b94509
TL
1794
1795e_ctx:
1796 ccp_dm_free(&ctx);
1797
1798 return ret;
1799}
1800
1801static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1802{
1803 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
6ba46c7d 1804 struct ccp_dm_workarea exp, src, dst;
63b94509 1805 struct ccp_op op;
956ee21a 1806 unsigned int sb_count, i_len, o_len;
63b94509
TL
1807 int ret;
1808
e28c190d
GH
1809 /* Check against the maximum allowable size, in bits */
1810 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
63b94509
TL
1811 return -EINVAL;
1812
1813 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1814 return -EINVAL;
1815
6ba46c7d
GH
1816 memset(&op, 0, sizeof(op));
1817 op.cmd_q = cmd_q;
1818 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1819
63b94509
TL
1820 /* The RSA modulus must precede the message being acted upon, so
1821 * it must be copied to a DMA area where the message and the
1822 * modulus can be concatenated. Therefore the input buffer
1823 * length required is twice the output buffer length (which
6ba46c7d
GH
1824 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1825 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1826 * required.
63b94509 1827 */
6ba46c7d 1828 o_len = 32 * ((rsa->key_size + 255) / 256);
63b94509
TL
1829 i_len = o_len * 2;
1830
d634baea 1831 sb_count = 0;
6ba46c7d
GH
1832 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1833 /* sb_count is the number of storage block slots required
1834 * for the modulus.
1835 */
1836 sb_count = o_len / CCP_SB_BYTES;
1837 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1838 sb_count);
1839 if (!op.sb_key)
1840 return -EIO;
1841 } else {
1842 /* A version 5 device allows a modulus size that will not fit
1843 * in the LSB, so the command will transfer it from memory.
1844 * Set the sb key to the default, even though it's not used.
1845 */
1846 op.sb_key = cmd_q->sb_key;
1847 }
63b94509 1848
6ba46c7d
GH
1849 /* The RSA exponent must be in little endian format. Reverse its
1850 * byte order.
63b94509
TL
1851 */
1852 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1853 if (ret)
956ee21a 1854 goto e_sb;
63b94509 1855
83d650ab 1856 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
355eba5d
TL
1857 if (ret)
1858 goto e_exp;
6ba46c7d
GH
1859
1860 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1861 /* Copy the exponent to the local storage block, using
1862 * as many 32-byte blocks as were allocated above. It's
1863 * already little endian, so no further change is required.
1864 */
1865 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1866 CCP_PASSTHRU_BYTESWAP_NOOP);
1867 if (ret) {
1868 cmd->engine_error = cmd_q->cmd_error;
1869 goto e_exp;
1870 }
1871 } else {
1872 /* The exponent can be retrieved from memory via DMA. */
1873 op.exp.u.dma.address = exp.dma.address;
1874 op.exp.u.dma.offset = 0;
63b94509
TL
1875 }
1876
1877 /* Concatenate the modulus and the message. Both the modulus and
1878 * the operands must be in little endian format. Since the input
1879 * is in big endian format it must be converted.
1880 */
1881 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1882 if (ret)
1883 goto e_exp;
1884
83d650ab 1885 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
355eba5d
TL
1886 if (ret)
1887 goto e_src;
83d650ab 1888 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
355eba5d
TL
1889 if (ret)
1890 goto e_src;
63b94509
TL
1891
1892 /* Prepare the output area for the operation */
6ba46c7d 1893 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
63b94509
TL
1894 if (ret)
1895 goto e_src;
1896
1897 op.soc = 1;
1898 op.src.u.dma.address = src.dma.address;
1899 op.src.u.dma.offset = 0;
1900 op.src.u.dma.length = i_len;
6ba46c7d 1901 op.dst.u.dma.address = dst.dma.address;
63b94509
TL
1902 op.dst.u.dma.offset = 0;
1903 op.dst.u.dma.length = o_len;
1904
1905 op.u.rsa.mod_size = rsa->key_size;
1906 op.u.rsa.input_len = i_len;
1907
a43eb985 1908 ret = cmd_q->ccp->vdata->perform->rsa(&op);
63b94509
TL
1909 if (ret) {
1910 cmd->engine_error = cmd_q->cmd_error;
1911 goto e_dst;
1912 }
1913
6ba46c7d 1914 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
63b94509
TL
1915
1916e_dst:
6ba46c7d 1917 ccp_dm_free(&dst);
63b94509
TL
1918
1919e_src:
1920 ccp_dm_free(&src);
1921
1922e_exp:
1923 ccp_dm_free(&exp);
1924
956ee21a 1925e_sb:
d634baea 1926 if (sb_count)
6ba46c7d 1927 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
63b94509
TL
1928
1929 return ret;
1930}
1931
1932static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1933 struct ccp_cmd *cmd)
1934{
1935 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1936 struct ccp_dm_workarea mask;
1937 struct ccp_data src, dst;
1938 struct ccp_op op;
1939 bool in_place = false;
1940 unsigned int i;
4b394a23 1941 int ret = 0;
63b94509
TL
1942
1943 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1944 return -EINVAL;
1945
1946 if (!pt->src || !pt->dst)
1947 return -EINVAL;
1948
1949 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1950 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1951 return -EINVAL;
1952 if (!pt->mask)
1953 return -EINVAL;
1954 }
1955
956ee21a 1956 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
63b94509
TL
1957
1958 memset(&op, 0, sizeof(op));
1959 op.cmd_q = cmd_q;
4b394a23 1960 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1961
1962 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1963 /* Load the mask */
956ee21a 1964 op.sb_key = cmd_q->sb_key;
63b94509
TL
1965
1966 ret = ccp_init_dm_workarea(&mask, cmd_q,
956ee21a
GH
1967 CCP_PASSTHRU_SB_COUNT *
1968 CCP_SB_BYTES,
63b94509
TL
1969 DMA_TO_DEVICE);
1970 if (ret)
1971 return ret;
1972
b698a9f4
GH
1973 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1974 if (ret)
1975 goto e_mask;
956ee21a
GH
1976 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1977 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1978 if (ret) {
1979 cmd->engine_error = cmd_q->cmd_error;
1980 goto e_mask;
1981 }
1982 }
1983
1984 /* Prepare the input and output data workareas. For in-place
1985 * operations we need to set the dma direction to BIDIRECTIONAL
1986 * and copy the src workarea to the dst workarea.
1987 */
1988 if (sg_virt(pt->src) == sg_virt(pt->dst))
1989 in_place = true;
1990
1991 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1992 CCP_PASSTHRU_MASKSIZE,
1993 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1994 if (ret)
1995 goto e_mask;
1996
8db88467 1997 if (in_place) {
63b94509 1998 dst = src;
8db88467 1999 } else {
63b94509
TL
2000 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2001 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2002 if (ret)
2003 goto e_src;
2004 }
2005
2006 /* Send data to the CCP Passthru engine
2007 * Because the CCP engine works on a single source and destination
2008 * dma address at a time, each entry in the source scatterlist
2009 * (after the dma_map_sg call) must be less than or equal to the
2010 * (remaining) length in the destination scatterlist entry and the
2011 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2012 */
2013 dst.sg_wa.sg_used = 0;
2014 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2015 if (!dst.sg_wa.sg ||
2016 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
2017 ret = -EINVAL;
2018 goto e_dst;
2019 }
2020
2021 if (i == src.sg_wa.dma_count) {
2022 op.eom = 1;
2023 op.soc = 1;
2024 }
2025
2026 op.src.type = CCP_MEMTYPE_SYSTEM;
2027 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2028 op.src.u.dma.offset = 0;
2029 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2030
2031 op.dst.type = CCP_MEMTYPE_SYSTEM;
2032 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
80e84c16
DJ
2033 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2034 op.dst.u.dma.length = op.src.u.dma.length;
63b94509 2035
a43eb985 2036 ret = cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
2037 if (ret) {
2038 cmd->engine_error = cmd_q->cmd_error;
2039 goto e_dst;
2040 }
2041
2042 dst.sg_wa.sg_used += src.sg_wa.sg->length;
2043 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
2044 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2045 dst.sg_wa.sg_used = 0;
2046 }
2047 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2048 }
2049
2050e_dst:
2051 if (!in_place)
2052 ccp_free_data(&dst, cmd_q);
2053
2054e_src:
2055 ccp_free_data(&src, cmd_q);
2056
2057e_mask:
2058 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2059 ccp_dm_free(&mask);
2060
2061 return ret;
2062}
2063
58ea8abf
GH
2064static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2065 struct ccp_cmd *cmd)
2066{
2067 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2068 struct ccp_dm_workarea mask;
2069 struct ccp_op op;
2070 int ret;
2071
2072 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2073 return -EINVAL;
2074
2075 if (!pt->src_dma || !pt->dst_dma)
2076 return -EINVAL;
2077
2078 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2079 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2080 return -EINVAL;
2081 if (!pt->mask)
2082 return -EINVAL;
2083 }
2084
956ee21a 2085 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
58ea8abf
GH
2086
2087 memset(&op, 0, sizeof(op));
2088 op.cmd_q = cmd_q;
bce386af 2089 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
58ea8abf
GH
2090
2091 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2092 /* Load the mask */
956ee21a 2093 op.sb_key = cmd_q->sb_key;
58ea8abf
GH
2094
2095 mask.length = pt->mask_len;
2096 mask.dma.address = pt->mask;
2097 mask.dma.length = pt->mask_len;
2098
956ee21a 2099 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
58ea8abf
GH
2100 CCP_PASSTHRU_BYTESWAP_NOOP);
2101 if (ret) {
2102 cmd->engine_error = cmd_q->cmd_error;
2103 return ret;
2104 }
2105 }
2106
2107 /* Send data to the CCP Passthru engine */
2108 op.eom = 1;
2109 op.soc = 1;
2110
2111 op.src.type = CCP_MEMTYPE_SYSTEM;
2112 op.src.u.dma.address = pt->src_dma;
2113 op.src.u.dma.offset = 0;
2114 op.src.u.dma.length = pt->src_len;
2115
2116 op.dst.type = CCP_MEMTYPE_SYSTEM;
2117 op.dst.u.dma.address = pt->dst_dma;
2118 op.dst.u.dma.offset = 0;
2119 op.dst.u.dma.length = pt->src_len;
2120
a43eb985 2121 ret = cmd_q->ccp->vdata->perform->passthru(&op);
58ea8abf
GH
2122 if (ret)
2123 cmd->engine_error = cmd_q->cmd_error;
2124
2125 return ret;
2126}
2127
63b94509
TL
2128static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2129{
2130 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2131 struct ccp_dm_workarea src, dst;
2132 struct ccp_op op;
2133 int ret;
2134 u8 *save;
2135
2136 if (!ecc->u.mm.operand_1 ||
2137 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2138 return -EINVAL;
2139
2140 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2141 if (!ecc->u.mm.operand_2 ||
2142 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2143 return -EINVAL;
2144
2145 if (!ecc->u.mm.result ||
2146 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2147 return -EINVAL;
2148
2149 memset(&op, 0, sizeof(op));
2150 op.cmd_q = cmd_q;
4b394a23 2151 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2152
2153 /* Concatenate the modulus and the operands. Both the modulus and
2154 * the operands must be in little endian format. Since the input
2155 * is in big endian format it must be converted and placed in a
2156 * fixed length buffer.
2157 */
2158 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2159 DMA_TO_DEVICE);
2160 if (ret)
2161 return ret;
2162
2163 /* Save the workarea address since it is updated in order to perform
2164 * the concatenation
2165 */
2166 save = src.address;
2167
2168 /* Copy the ECC modulus */
83d650ab 2169 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2170 if (ret)
2171 goto e_src;
63b94509
TL
2172 src.address += CCP_ECC_OPERAND_SIZE;
2173
2174 /* Copy the first operand */
83d650ab
GH
2175 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2176 ecc->u.mm.operand_1_len);
355eba5d
TL
2177 if (ret)
2178 goto e_src;
63b94509
TL
2179 src.address += CCP_ECC_OPERAND_SIZE;
2180
2181 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2182 /* Copy the second operand */
83d650ab
GH
2183 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2184 ecc->u.mm.operand_2_len);
355eba5d
TL
2185 if (ret)
2186 goto e_src;
63b94509
TL
2187 src.address += CCP_ECC_OPERAND_SIZE;
2188 }
2189
2190 /* Restore the workarea address */
2191 src.address = save;
2192
2193 /* Prepare the output area for the operation */
2194 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2195 DMA_FROM_DEVICE);
2196 if (ret)
2197 goto e_src;
2198
2199 op.soc = 1;
2200 op.src.u.dma.address = src.dma.address;
2201 op.src.u.dma.offset = 0;
2202 op.src.u.dma.length = src.length;
2203 op.dst.u.dma.address = dst.dma.address;
2204 op.dst.u.dma.offset = 0;
2205 op.dst.u.dma.length = dst.length;
2206
2207 op.u.ecc.function = cmd->u.ecc.function;
2208
a43eb985 2209 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2210 if (ret) {
2211 cmd->engine_error = cmd_q->cmd_error;
2212 goto e_dst;
2213 }
2214
2215 ecc->ecc_result = le16_to_cpup(
2216 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2217 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2218 ret = -EIO;
2219 goto e_dst;
2220 }
2221
2222 /* Save the ECC result */
83d650ab
GH
2223 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2224 CCP_ECC_MODULUS_BYTES);
63b94509
TL
2225
2226e_dst:
2227 ccp_dm_free(&dst);
2228
2229e_src:
2230 ccp_dm_free(&src);
2231
2232 return ret;
2233}
2234
2235static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2236{
2237 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2238 struct ccp_dm_workarea src, dst;
2239 struct ccp_op op;
2240 int ret;
2241 u8 *save;
2242
2243 if (!ecc->u.pm.point_1.x ||
2244 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2245 !ecc->u.pm.point_1.y ||
2246 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2247 return -EINVAL;
2248
2249 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2250 if (!ecc->u.pm.point_2.x ||
2251 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2252 !ecc->u.pm.point_2.y ||
2253 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2254 return -EINVAL;
2255 } else {
2256 if (!ecc->u.pm.domain_a ||
2257 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2258 return -EINVAL;
2259
2260 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2261 if (!ecc->u.pm.scalar ||
2262 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2263 return -EINVAL;
2264 }
2265
2266 if (!ecc->u.pm.result.x ||
2267 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2268 !ecc->u.pm.result.y ||
2269 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2270 return -EINVAL;
2271
2272 memset(&op, 0, sizeof(op));
2273 op.cmd_q = cmd_q;
4b394a23 2274 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2275
2276 /* Concatenate the modulus and the operands. Both the modulus and
2277 * the operands must be in little endian format. Since the input
2278 * is in big endian format it must be converted and placed in a
2279 * fixed length buffer.
2280 */
2281 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2282 DMA_TO_DEVICE);
2283 if (ret)
2284 return ret;
2285
2286 /* Save the workarea address since it is updated in order to perform
2287 * the concatenation
2288 */
2289 save = src.address;
2290
2291 /* Copy the ECC modulus */
83d650ab 2292 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2293 if (ret)
2294 goto e_src;
63b94509
TL
2295 src.address += CCP_ECC_OPERAND_SIZE;
2296
2297 /* Copy the first point X and Y coordinate */
83d650ab
GH
2298 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2299 ecc->u.pm.point_1.x_len);
355eba5d
TL
2300 if (ret)
2301 goto e_src;
63b94509 2302 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2303 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2304 ecc->u.pm.point_1.y_len);
355eba5d
TL
2305 if (ret)
2306 goto e_src;
63b94509
TL
2307 src.address += CCP_ECC_OPERAND_SIZE;
2308
4b394a23 2309 /* Set the first point Z coordinate to 1 */
8db88467 2310 *src.address = 0x01;
63b94509
TL
2311 src.address += CCP_ECC_OPERAND_SIZE;
2312
2313 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2314 /* Copy the second point X and Y coordinate */
83d650ab
GH
2315 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2316 ecc->u.pm.point_2.x_len);
355eba5d
TL
2317 if (ret)
2318 goto e_src;
63b94509 2319 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2320 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2321 ecc->u.pm.point_2.y_len);
355eba5d
TL
2322 if (ret)
2323 goto e_src;
63b94509
TL
2324 src.address += CCP_ECC_OPERAND_SIZE;
2325
4b394a23 2326 /* Set the second point Z coordinate to 1 */
8db88467 2327 *src.address = 0x01;
63b94509
TL
2328 src.address += CCP_ECC_OPERAND_SIZE;
2329 } else {
2330 /* Copy the Domain "a" parameter */
83d650ab
GH
2331 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2332 ecc->u.pm.domain_a_len);
355eba5d
TL
2333 if (ret)
2334 goto e_src;
63b94509
TL
2335 src.address += CCP_ECC_OPERAND_SIZE;
2336
2337 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2338 /* Copy the scalar value */
83d650ab
GH
2339 ret = ccp_reverse_set_dm_area(&src, 0,
2340 ecc->u.pm.scalar, 0,
2341 ecc->u.pm.scalar_len);
355eba5d
TL
2342 if (ret)
2343 goto e_src;
63b94509
TL
2344 src.address += CCP_ECC_OPERAND_SIZE;
2345 }
2346 }
2347
2348 /* Restore the workarea address */
2349 src.address = save;
2350
2351 /* Prepare the output area for the operation */
2352 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2353 DMA_FROM_DEVICE);
2354 if (ret)
2355 goto e_src;
2356
2357 op.soc = 1;
2358 op.src.u.dma.address = src.dma.address;
2359 op.src.u.dma.offset = 0;
2360 op.src.u.dma.length = src.length;
2361 op.dst.u.dma.address = dst.dma.address;
2362 op.dst.u.dma.offset = 0;
2363 op.dst.u.dma.length = dst.length;
2364
2365 op.u.ecc.function = cmd->u.ecc.function;
2366
a43eb985 2367 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2368 if (ret) {
2369 cmd->engine_error = cmd_q->cmd_error;
2370 goto e_dst;
2371 }
2372
2373 ecc->ecc_result = le16_to_cpup(
2374 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2375 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2376 ret = -EIO;
2377 goto e_dst;
2378 }
2379
2380 /* Save the workarea address since it is updated as we walk through
2381 * to copy the point math result
2382 */
2383 save = dst.address;
2384
2385 /* Save the ECC result X and Y coordinates */
83d650ab 2386 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
63b94509
TL
2387 CCP_ECC_MODULUS_BYTES);
2388 dst.address += CCP_ECC_OUTPUT_SIZE;
83d650ab 2389 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
63b94509
TL
2390 CCP_ECC_MODULUS_BYTES);
2391 dst.address += CCP_ECC_OUTPUT_SIZE;
2392
2393 /* Restore the workarea address */
2394 dst.address = save;
2395
2396e_dst:
2397 ccp_dm_free(&dst);
2398
2399e_src:
2400 ccp_dm_free(&src);
2401
2402 return ret;
2403}
2404
2405static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2406{
2407 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2408
2409 ecc->ecc_result = 0;
2410
2411 if (!ecc->mod ||
2412 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2413 return -EINVAL;
2414
2415 switch (ecc->function) {
2416 case CCP_ECC_FUNCTION_MMUL_384BIT:
2417 case CCP_ECC_FUNCTION_MADD_384BIT:
2418 case CCP_ECC_FUNCTION_MINV_384BIT:
2419 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2420
2421 case CCP_ECC_FUNCTION_PADD_384BIT:
2422 case CCP_ECC_FUNCTION_PMUL_384BIT:
2423 case CCP_ECC_FUNCTION_PDBL_384BIT:
2424 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2425
2426 default:
2427 return -EINVAL;
2428 }
2429}
2430
2431int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2432{
2433 int ret;
2434
2435 cmd->engine_error = 0;
2436 cmd_q->cmd_error = 0;
2437 cmd_q->int_rcvd = 0;
bb4e89b3 2438 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
63b94509
TL
2439
2440 switch (cmd->engine) {
2441 case CCP_ENGINE_AES:
2442 ret = ccp_run_aes_cmd(cmd_q, cmd);
2443 break;
2444 case CCP_ENGINE_XTS_AES_128:
2445 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2446 break;
990672d4
GH
2447 case CCP_ENGINE_DES3:
2448 ret = ccp_run_des3_cmd(cmd_q, cmd);
2449 break;
63b94509
TL
2450 case CCP_ENGINE_SHA:
2451 ret = ccp_run_sha_cmd(cmd_q, cmd);
2452 break;
2453 case CCP_ENGINE_RSA:
2454 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2455 break;
2456 case CCP_ENGINE_PASSTHRU:
58ea8abf
GH
2457 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2458 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2459 else
2460 ret = ccp_run_passthru_cmd(cmd_q, cmd);
63b94509
TL
2461 break;
2462 case CCP_ENGINE_ECC:
2463 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2464 break;
2465 default:
2466 ret = -EINVAL;
2467 }
2468
2469 return ret;
2470}