]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/ccp/ccp-ops.c
crypto: ccp - memset structure fields to zero before reuse
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / ccp / ccp-ops.c
CommitLineData
63b94509
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
68cc652f 4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
63b94509
TL
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
a43eb985 7 * Author: Gary R Hook <gary.hook@amd.com>
63b94509
TL
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
63b94509 17#include <linux/interrupt.h>
63b94509 18#include <crypto/scatterwalk.h>
990672d4 19#include <crypto/des.h>
ea0375af 20#include <linux/ccp.h>
63b94509
TL
21
22#include "ccp-dev.h"
23
c11baa02 24/* SHA initial context values */
4b394a23 25static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
4b394a23 28 cpu_to_be32(SHA1_H4),
c11baa02
TL
29};
30
4b394a23 31static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
36};
37
4b394a23 38static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
43};
44
ccebcf3f
GH
45static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
50};
51
52static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
57};
58
4b394a23
GH
59#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
60 ccp_gen_jobid(ccp) : 0)
61
63b94509
TL
62static u32 ccp_gen_jobid(struct ccp_device *ccp)
63{
64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
65}
66
67static void ccp_sg_free(struct ccp_sg_workarea *wa)
68{
69 if (wa->dma_count)
70 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
71
72 wa->dma_count = 0;
73}
74
75static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
81a59f00 76 struct scatterlist *sg, u64 len,
63b94509
TL
77 enum dma_data_direction dma_dir)
78{
79 memset(wa, 0, sizeof(*wa));
80
81 wa->sg = sg;
82 if (!sg)
83 return 0;
84
fb43f694
TL
85 wa->nents = sg_nents_for_len(sg, len);
86 if (wa->nents < 0)
87 return wa->nents;
88
63b94509
TL
89 wa->bytes_left = len;
90 wa->sg_used = 0;
91
92 if (len == 0)
93 return 0;
94
95 if (dma_dir == DMA_NONE)
96 return 0;
97
98 wa->dma_sg = sg;
99 wa->dma_dev = dev;
100 wa->dma_dir = dma_dir;
101 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
102 if (!wa->dma_count)
103 return -ENOMEM;
104
63b94509
TL
105 return 0;
106}
107
108static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
109{
81a59f00 110 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
63b94509
TL
111
112 if (!wa->sg)
113 return;
114
115 wa->sg_used += nbytes;
116 wa->bytes_left -= nbytes;
117 if (wa->sg_used == wa->sg->length) {
118 wa->sg = sg_next(wa->sg);
119 wa->sg_used = 0;
120 }
121}
122
123static void ccp_dm_free(struct ccp_dm_workarea *wa)
124{
125 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
126 if (wa->address)
127 dma_pool_free(wa->dma_pool, wa->address,
128 wa->dma.address);
129 } else {
130 if (wa->dma.address)
131 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
132 wa->dma.dir);
133 kfree(wa->address);
134 }
135
136 wa->address = NULL;
137 wa->dma.address = 0;
138}
139
140static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
141 struct ccp_cmd_queue *cmd_q,
142 unsigned int len,
143 enum dma_data_direction dir)
144{
145 memset(wa, 0, sizeof(*wa));
146
147 if (!len)
148 return 0;
149
150 wa->dev = cmd_q->ccp->dev;
151 wa->length = len;
152
153 if (len <= CCP_DMAPOOL_MAX_SIZE) {
154 wa->dma_pool = cmd_q->dma_pool;
155
156 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
157 &wa->dma.address);
158 if (!wa->address)
159 return -ENOMEM;
160
161 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
162
163 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
164 } else {
165 wa->address = kzalloc(len, GFP_KERNEL);
166 if (!wa->address)
167 return -ENOMEM;
168
169 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
170 dir);
ef4064bb 171 if (dma_mapping_error(wa->dev, wa->dma.address))
63b94509
TL
172 return -ENOMEM;
173
174 wa->dma.length = len;
175 }
176 wa->dma.dir = dir;
177
178 return 0;
179}
180
181static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
182 struct scatterlist *sg, unsigned int sg_offset,
183 unsigned int len)
184{
185 WARN_ON(!wa->address);
186
187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
188 0);
189}
190
191static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
192 struct scatterlist *sg, unsigned int sg_offset,
193 unsigned int len)
194{
195 WARN_ON(!wa->address);
196
197 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
198 1);
199}
200
355eba5d 201static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
83d650ab 202 unsigned int wa_offset,
355eba5d 203 struct scatterlist *sg,
83d650ab
GH
204 unsigned int sg_offset,
205 unsigned int len)
63b94509 206{
83d650ab
GH
207 u8 *p, *q;
208
209 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
210
211 p = wa->address + wa_offset;
212 q = p + len - 1;
213 while (p < q) {
214 *p = *p ^ *q;
215 *q = *p ^ *q;
216 *p = *p ^ *q;
217 p++;
218 q--;
63b94509 219 }
355eba5d 220 return 0;
63b94509
TL
221}
222
223static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
83d650ab 224 unsigned int wa_offset,
63b94509 225 struct scatterlist *sg,
83d650ab 226 unsigned int sg_offset,
63b94509
TL
227 unsigned int len)
228{
83d650ab
GH
229 u8 *p, *q;
230
231 p = wa->address + wa_offset;
232 q = p + len - 1;
233 while (p < q) {
234 *p = *p ^ *q;
235 *q = *p ^ *q;
236 *p = *p ^ *q;
237 p++;
238 q--;
63b94509 239 }
83d650ab
GH
240
241 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
63b94509
TL
242}
243
244static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
245{
246 ccp_dm_free(&data->dm_wa);
247 ccp_sg_free(&data->sg_wa);
248}
249
250static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
81a59f00 251 struct scatterlist *sg, u64 sg_len,
63b94509
TL
252 unsigned int dm_len,
253 enum dma_data_direction dir)
254{
255 int ret;
256
257 memset(data, 0, sizeof(*data));
258
259 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
260 dir);
261 if (ret)
262 goto e_err;
263
264 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
265 if (ret)
266 goto e_err;
267
268 return 0;
269
270e_err:
271 ccp_free_data(data, cmd_q);
272
273 return ret;
274}
275
276static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
277{
278 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
279 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
280 unsigned int buf_count, nbytes;
281
282 /* Clear the buffer if setting it */
283 if (!from)
284 memset(dm_wa->address, 0, dm_wa->length);
285
286 if (!sg_wa->sg)
287 return 0;
288
81a59f00
TL
289 /* Perform the copy operation
290 * nbytes will always be <= UINT_MAX because dm_wa->length is
291 * an unsigned int
292 */
293 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
63b94509
TL
294 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
295 nbytes, from);
296
297 /* Update the structures and generate the count */
298 buf_count = 0;
299 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
81a59f00
TL
300 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
301 dm_wa->length - buf_count);
302 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
63b94509
TL
303
304 buf_count += nbytes;
305 ccp_update_sg_workarea(sg_wa, nbytes);
306 }
307
308 return buf_count;
309}
310
311static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
312{
313 return ccp_queue_buf(data, 0);
314}
315
316static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
317{
318 return ccp_queue_buf(data, 1);
319}
320
321static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
322 struct ccp_op *op, unsigned int block_size,
323 bool blocksize_op)
324{
325 unsigned int sg_src_len, sg_dst_len, op_len;
326
327 /* The CCP can only DMA from/to one address each per operation. This
328 * requires that we find the smallest DMA area between the source
81a59f00
TL
329 * and destination. The resulting len values will always be <= UINT_MAX
330 * because the dma length is an unsigned int.
63b94509 331 */
81a59f00
TL
332 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
333 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
63b94509
TL
334
335 if (dst) {
81a59f00
TL
336 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
337 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
63b94509 338 op_len = min(sg_src_len, sg_dst_len);
8db88467 339 } else {
63b94509 340 op_len = sg_src_len;
8db88467 341 }
63b94509
TL
342
343 /* The data operation length will be at least block_size in length
344 * or the smaller of available sg room remaining for the source or
345 * the destination
346 */
347 op_len = max(op_len, block_size);
348
349 /* Unless we have to buffer data, there's no reason to wait */
350 op->soc = 0;
351
352 if (sg_src_len < block_size) {
353 /* Not enough data in the sg element, so it
354 * needs to be buffered into a blocksize chunk
355 */
356 int cp_len = ccp_fill_queue_buf(src);
357
358 op->soc = 1;
359 op->src.u.dma.address = src->dm_wa.dma.address;
360 op->src.u.dma.offset = 0;
361 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
362 } else {
363 /* Enough data in the sg element, but we need to
364 * adjust for any previously copied data
365 */
366 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
367 op->src.u.dma.offset = src->sg_wa.sg_used;
368 op->src.u.dma.length = op_len & ~(block_size - 1);
369
370 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
371 }
372
373 if (dst) {
374 if (sg_dst_len < block_size) {
375 /* Not enough room in the sg element or we're on the
376 * last piece of data (when using padding), so the
377 * output needs to be buffered into a blocksize chunk
378 */
379 op->soc = 1;
380 op->dst.u.dma.address = dst->dm_wa.dma.address;
381 op->dst.u.dma.offset = 0;
382 op->dst.u.dma.length = op->src.u.dma.length;
383 } else {
384 /* Enough room in the sg element, but we need to
385 * adjust for any previously used area
386 */
387 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
388 op->dst.u.dma.offset = dst->sg_wa.sg_used;
389 op->dst.u.dma.length = op->src.u.dma.length;
390 }
391 }
392}
393
394static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
395 struct ccp_op *op)
396{
397 op->init = 0;
398
399 if (dst) {
400 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
401 ccp_empty_queue_buf(dst);
402 else
403 ccp_update_sg_workarea(&dst->sg_wa,
404 op->dst.u.dma.length);
405 }
406}
407
956ee21a
GH
408static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
409 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
410 u32 byte_swap, bool from)
63b94509
TL
411{
412 struct ccp_op op;
413
414 memset(&op, 0, sizeof(op));
415
416 op.cmd_q = cmd_q;
417 op.jobid = jobid;
418 op.eom = 1;
419
420 if (from) {
421 op.soc = 1;
956ee21a
GH
422 op.src.type = CCP_MEMTYPE_SB;
423 op.src.u.sb = sb;
63b94509
TL
424 op.dst.type = CCP_MEMTYPE_SYSTEM;
425 op.dst.u.dma.address = wa->dma.address;
426 op.dst.u.dma.length = wa->length;
427 } else {
428 op.src.type = CCP_MEMTYPE_SYSTEM;
429 op.src.u.dma.address = wa->dma.address;
430 op.src.u.dma.length = wa->length;
956ee21a
GH
431 op.dst.type = CCP_MEMTYPE_SB;
432 op.dst.u.sb = sb;
63b94509
TL
433 }
434
435 op.u.passthru.byte_swap = byte_swap;
436
a43eb985 437 return cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
438}
439
956ee21a
GH
440static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
441 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
442 u32 byte_swap)
63b94509 443{
956ee21a 444 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
63b94509
TL
445}
446
956ee21a
GH
447static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
448 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
449 u32 byte_swap)
63b94509 450{
956ee21a 451 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
63b94509
TL
452}
453
454static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
455 struct ccp_cmd *cmd)
456{
457 struct ccp_aes_engine *aes = &cmd->u.aes;
458 struct ccp_dm_workarea key, ctx;
459 struct ccp_data src;
460 struct ccp_op op;
461 unsigned int dm_offset;
462 int ret;
463
464 if (!((aes->key_len == AES_KEYSIZE_128) ||
465 (aes->key_len == AES_KEYSIZE_192) ||
466 (aes->key_len == AES_KEYSIZE_256)))
467 return -EINVAL;
468
469 if (aes->src_len & (AES_BLOCK_SIZE - 1))
470 return -EINVAL;
471
472 if (aes->iv_len != AES_BLOCK_SIZE)
473 return -EINVAL;
474
475 if (!aes->key || !aes->iv || !aes->src)
476 return -EINVAL;
477
478 if (aes->cmac_final) {
479 if (aes->cmac_key_len != AES_BLOCK_SIZE)
480 return -EINVAL;
481
482 if (!aes->cmac_key)
483 return -EINVAL;
484 }
485
956ee21a
GH
486 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
487 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
488
489 ret = -EIO;
490 memset(&op, 0, sizeof(op));
491 op.cmd_q = cmd_q;
4b394a23 492 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
493 op.sb_key = cmd_q->sb_key;
494 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
495 op.init = 1;
496 op.u.aes.type = aes->type;
497 op.u.aes.mode = aes->mode;
498 op.u.aes.action = aes->action;
499
956ee21a 500 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
501 * and must be in little endian format. Use the 256-bit byte
502 * swap passthru option to convert from big endian to little
503 * endian.
504 */
505 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 506 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
507 DMA_TO_DEVICE);
508 if (ret)
509 return ret;
510
956ee21a 511 dm_offset = CCP_SB_BYTES - aes->key_len;
63b94509 512 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
956ee21a
GH
513 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
514 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
515 if (ret) {
516 cmd->engine_error = cmd_q->cmd_error;
517 goto e_key;
518 }
519
956ee21a 520 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
521 * must be in little endian format. Use the 256-bit byte swap
522 * passthru option to convert from big endian to little endian.
523 */
524 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 525 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
526 DMA_BIDIRECTIONAL);
527 if (ret)
528 goto e_key;
529
956ee21a 530 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509 531 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
956ee21a
GH
532 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
533 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
534 if (ret) {
535 cmd->engine_error = cmd_q->cmd_error;
536 goto e_ctx;
537 }
538
539 /* Send data to the CCP AES engine */
540 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
541 AES_BLOCK_SIZE, DMA_TO_DEVICE);
542 if (ret)
543 goto e_ctx;
544
545 while (src.sg_wa.bytes_left) {
546 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
547 if (aes->cmac_final && !src.sg_wa.bytes_left) {
548 op.eom = 1;
549
550 /* Push the K1/K2 key to the CCP now */
956ee21a
GH
551 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
552 op.sb_ctx,
553 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
554 if (ret) {
555 cmd->engine_error = cmd_q->cmd_error;
556 goto e_src;
557 }
558
559 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
560 aes->cmac_key_len);
956ee21a
GH
561 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
562 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
563 if (ret) {
564 cmd->engine_error = cmd_q->cmd_error;
565 goto e_src;
566 }
567 }
568
a43eb985 569 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
570 if (ret) {
571 cmd->engine_error = cmd_q->cmd_error;
572 goto e_src;
573 }
574
575 ccp_process_data(&src, NULL, &op);
576 }
577
578 /* Retrieve the AES context - convert from LE to BE using
579 * 32-byte (256-bit) byteswapping
580 */
956ee21a
GH
581 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
582 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
583 if (ret) {
584 cmd->engine_error = cmd_q->cmd_error;
585 goto e_src;
586 }
587
588 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 589 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
590 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
591
592e_src:
593 ccp_free_data(&src, cmd_q);
594
595e_ctx:
596 ccp_dm_free(&ctx);
597
598e_key:
599 ccp_dm_free(&key);
600
601 return ret;
602}
603
36cf515b
GH
604static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
605 struct ccp_cmd *cmd)
606{
607 struct ccp_aes_engine *aes = &cmd->u.aes;
608 struct ccp_dm_workarea key, ctx, final_wa, tag;
609 struct ccp_data src, dst;
610 struct ccp_data aad;
611 struct ccp_op op;
612
613 unsigned long long *final;
614 unsigned int dm_offset;
4e6ddfef 615 unsigned int jobid;
36cf515b
GH
616 unsigned int ilen;
617 bool in_place = true; /* Default value */
618 int ret;
619
620 struct scatterlist *p_inp, sg_inp[2];
621 struct scatterlist *p_tag, sg_tag[2];
622 struct scatterlist *p_outp, sg_outp[2];
623 struct scatterlist *p_aad;
624
625 if (!aes->iv)
626 return -EINVAL;
627
628 if (!((aes->key_len == AES_KEYSIZE_128) ||
629 (aes->key_len == AES_KEYSIZE_192) ||
630 (aes->key_len == AES_KEYSIZE_256)))
631 return -EINVAL;
632
633 if (!aes->key) /* Gotta have a key SGL */
634 return -EINVAL;
635
636 /* First, decompose the source buffer into AAD & PT,
637 * and the destination buffer into AAD, CT & tag, or
638 * the input into CT & tag.
639 * It is expected that the input and output SGs will
640 * be valid, even if the AAD and input lengths are 0.
641 */
642 p_aad = aes->src;
643 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
644 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
645 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
646 ilen = aes->src_len;
647 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
648 } else {
649 /* Input length for decryption includes tag */
650 ilen = aes->src_len - AES_BLOCK_SIZE;
651 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
652 }
653
4e6ddfef
HG
654 jobid = CCP_NEW_JOBID(cmd_q->ccp);
655
36cf515b
GH
656 memset(&op, 0, sizeof(op));
657 op.cmd_q = cmd_q;
4e6ddfef 658 op.jobid = jobid;
36cf515b
GH
659 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
660 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
661 op.init = 1;
662 op.u.aes.type = aes->type;
663
664 /* Copy the key to the LSB */
665 ret = ccp_init_dm_workarea(&key, cmd_q,
666 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
667 DMA_TO_DEVICE);
668 if (ret)
669 return ret;
670
671 dm_offset = CCP_SB_BYTES - aes->key_len;
672 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
673 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
674 CCP_PASSTHRU_BYTESWAP_256BIT);
675 if (ret) {
676 cmd->engine_error = cmd_q->cmd_error;
677 goto e_key;
678 }
679
680 /* Copy the context (IV) to the LSB.
681 * There is an assumption here that the IV is 96 bits in length, plus
682 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
683 */
684 ret = ccp_init_dm_workarea(&ctx, cmd_q,
685 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
686 DMA_BIDIRECTIONAL);
687 if (ret)
688 goto e_key;
689
690 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
691 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
692
693 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
694 CCP_PASSTHRU_BYTESWAP_256BIT);
695 if (ret) {
696 cmd->engine_error = cmd_q->cmd_error;
697 goto e_ctx;
698 }
699
700 op.init = 1;
701 if (aes->aad_len > 0) {
702 /* Step 1: Run a GHASH over the Additional Authenticated Data */
703 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
704 AES_BLOCK_SIZE,
705 DMA_TO_DEVICE);
706 if (ret)
707 goto e_ctx;
708
709 op.u.aes.mode = CCP_AES_MODE_GHASH;
710 op.u.aes.action = CCP_AES_GHASHAAD;
711
712 while (aad.sg_wa.bytes_left) {
713 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
714
715 ret = cmd_q->ccp->vdata->perform->aes(&op);
716 if (ret) {
717 cmd->engine_error = cmd_q->cmd_error;
718 goto e_aad;
719 }
720
721 ccp_process_data(&aad, NULL, &op);
722 op.init = 0;
723 }
724 }
725
726 op.u.aes.mode = CCP_AES_MODE_GCTR;
727 op.u.aes.action = aes->action;
728
729 if (ilen > 0) {
730 /* Step 2: Run a GCTR over the plaintext */
731 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
732
733 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
734 AES_BLOCK_SIZE,
735 in_place ? DMA_BIDIRECTIONAL
736 : DMA_TO_DEVICE);
737 if (ret)
738 goto e_ctx;
739
740 if (in_place) {
741 dst = src;
742 } else {
743 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
744 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
745 if (ret)
746 goto e_src;
747 }
748
749 op.soc = 0;
750 op.eom = 0;
751 op.init = 1;
752 while (src.sg_wa.bytes_left) {
753 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
754 if (!src.sg_wa.bytes_left) {
755 unsigned int nbytes = aes->src_len
756 % AES_BLOCK_SIZE;
757
758 if (nbytes) {
759 op.eom = 1;
760 op.u.aes.size = (nbytes * 8) - 1;
761 }
762 }
763
764 ret = cmd_q->ccp->vdata->perform->aes(&op);
765 if (ret) {
766 cmd->engine_error = cmd_q->cmd_error;
767 goto e_dst;
768 }
769
770 ccp_process_data(&src, &dst, &op);
771 op.init = 0;
772 }
773 }
774
775 /* Step 3: Update the IV portion of the context with the original IV */
776 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
777 CCP_PASSTHRU_BYTESWAP_256BIT);
778 if (ret) {
779 cmd->engine_error = cmd_q->cmd_error;
780 goto e_dst;
781 }
782
783 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
784
785 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
786 CCP_PASSTHRU_BYTESWAP_256BIT);
787 if (ret) {
788 cmd->engine_error = cmd_q->cmd_error;
789 goto e_dst;
790 }
791
792 /* Step 4: Concatenate the lengths of the AAD and source, and
793 * hash that 16 byte buffer.
794 */
795 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
796 DMA_BIDIRECTIONAL);
797 if (ret)
798 goto e_dst;
799 final = (unsigned long long *) final_wa.address;
800 final[0] = cpu_to_be64(aes->aad_len * 8);
801 final[1] = cpu_to_be64(ilen * 8);
802
4e6ddfef
HG
803 memset(&op, 0, sizeof(op));
804 op.cmd_q = cmd_q;
805 op.jobid = jobid;
806 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
807 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
808 op.init = 1;
809 op.u.aes.type = aes->type;
36cf515b
GH
810 op.u.aes.mode = CCP_AES_MODE_GHASH;
811 op.u.aes.action = CCP_AES_GHASHFINAL;
812 op.src.type = CCP_MEMTYPE_SYSTEM;
813 op.src.u.dma.address = final_wa.dma.address;
814 op.src.u.dma.length = AES_BLOCK_SIZE;
815 op.dst.type = CCP_MEMTYPE_SYSTEM;
816 op.dst.u.dma.address = final_wa.dma.address;
817 op.dst.u.dma.length = AES_BLOCK_SIZE;
818 op.eom = 1;
819 op.u.aes.size = 0;
820 ret = cmd_q->ccp->vdata->perform->aes(&op);
821 if (ret)
822 goto e_dst;
823
824 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
825 /* Put the ciphered tag after the ciphertext. */
826 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
827 } else {
828 /* Does this ciphered tag match the input? */
829 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
830 DMA_BIDIRECTIONAL);
831 if (ret)
832 goto e_tag;
833 ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
834
835 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
836 ccp_dm_free(&tag);
837 }
838
839e_tag:
840 ccp_dm_free(&final_wa);
841
842e_dst:
843 if (aes->src_len && !in_place)
844 ccp_free_data(&dst, cmd_q);
845
846e_src:
847 if (aes->src_len)
848 ccp_free_data(&src, cmd_q);
849
850e_aad:
851 if (aes->aad_len)
852 ccp_free_data(&aad, cmd_q);
853
854e_ctx:
855 ccp_dm_free(&ctx);
856
857e_key:
858 ccp_dm_free(&key);
859
860 return ret;
861}
862
63b94509
TL
863static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
864{
865 struct ccp_aes_engine *aes = &cmd->u.aes;
866 struct ccp_dm_workarea key, ctx;
867 struct ccp_data src, dst;
868 struct ccp_op op;
869 unsigned int dm_offset;
870 bool in_place = false;
871 int ret;
872
873 if (aes->mode == CCP_AES_MODE_CMAC)
874 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
875
36cf515b
GH
876 if (aes->mode == CCP_AES_MODE_GCM)
877 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
878
63b94509
TL
879 if (!((aes->key_len == AES_KEYSIZE_128) ||
880 (aes->key_len == AES_KEYSIZE_192) ||
881 (aes->key_len == AES_KEYSIZE_256)))
882 return -EINVAL;
883
884 if (((aes->mode == CCP_AES_MODE_ECB) ||
885 (aes->mode == CCP_AES_MODE_CBC) ||
886 (aes->mode == CCP_AES_MODE_CFB)) &&
887 (aes->src_len & (AES_BLOCK_SIZE - 1)))
888 return -EINVAL;
889
890 if (!aes->key || !aes->src || !aes->dst)
891 return -EINVAL;
892
893 if (aes->mode != CCP_AES_MODE_ECB) {
894 if (aes->iv_len != AES_BLOCK_SIZE)
895 return -EINVAL;
896
897 if (!aes->iv)
898 return -EINVAL;
899 }
900
956ee21a
GH
901 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
902 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
903
904 ret = -EIO;
905 memset(&op, 0, sizeof(op));
906 op.cmd_q = cmd_q;
4b394a23 907 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
908 op.sb_key = cmd_q->sb_key;
909 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
910 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
911 op.u.aes.type = aes->type;
912 op.u.aes.mode = aes->mode;
913 op.u.aes.action = aes->action;
914
956ee21a 915 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
916 * and must be in little endian format. Use the 256-bit byte
917 * swap passthru option to convert from big endian to little
918 * endian.
919 */
920 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 921 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
922 DMA_TO_DEVICE);
923 if (ret)
924 return ret;
925
956ee21a 926 dm_offset = CCP_SB_BYTES - aes->key_len;
63b94509 927 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
956ee21a
GH
928 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
929 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
930 if (ret) {
931 cmd->engine_error = cmd_q->cmd_error;
932 goto e_key;
933 }
934
956ee21a 935 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
936 * must be in little endian format. Use the 256-bit byte swap
937 * passthru option to convert from big endian to little endian.
938 */
939 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 940 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
941 DMA_BIDIRECTIONAL);
942 if (ret)
943 goto e_key;
944
945 if (aes->mode != CCP_AES_MODE_ECB) {
4b394a23 946 /* Load the AES context - convert to LE */
956ee21a 947 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509 948 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
956ee21a
GH
949 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
950 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
951 if (ret) {
952 cmd->engine_error = cmd_q->cmd_error;
953 goto e_ctx;
954 }
955 }
f7cc02b3
GH
956 switch (aes->mode) {
957 case CCP_AES_MODE_CFB: /* CFB128 only */
958 case CCP_AES_MODE_CTR:
959 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
960 break;
961 default:
962 op.u.aes.size = 0;
963 }
63b94509
TL
964
965 /* Prepare the input and output data workareas. For in-place
966 * operations we need to set the dma direction to BIDIRECTIONAL
967 * and copy the src workarea to the dst workarea.
968 */
969 if (sg_virt(aes->src) == sg_virt(aes->dst))
970 in_place = true;
971
972 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
973 AES_BLOCK_SIZE,
974 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
975 if (ret)
976 goto e_ctx;
977
8db88467 978 if (in_place) {
63b94509 979 dst = src;
8db88467 980 } else {
63b94509
TL
981 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
982 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
983 if (ret)
984 goto e_src;
985 }
986
987 /* Send data to the CCP AES engine */
988 while (src.sg_wa.bytes_left) {
989 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
990 if (!src.sg_wa.bytes_left) {
991 op.eom = 1;
992
993 /* Since we don't retrieve the AES context in ECB
994 * mode we have to wait for the operation to complete
995 * on the last piece of data
996 */
997 if (aes->mode == CCP_AES_MODE_ECB)
998 op.soc = 1;
999 }
1000
a43eb985 1001 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
1002 if (ret) {
1003 cmd->engine_error = cmd_q->cmd_error;
1004 goto e_dst;
1005 }
1006
1007 ccp_process_data(&src, &dst, &op);
1008 }
1009
1010 if (aes->mode != CCP_AES_MODE_ECB) {
1011 /* Retrieve the AES context - convert from LE to BE using
1012 * 32-byte (256-bit) byteswapping
1013 */
956ee21a
GH
1014 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1015 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1016 if (ret) {
1017 cmd->engine_error = cmd_q->cmd_error;
1018 goto e_dst;
1019 }
1020
1021 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1022 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1023 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1024 }
1025
1026e_dst:
1027 if (!in_place)
1028 ccp_free_data(&dst, cmd_q);
1029
1030e_src:
1031 ccp_free_data(&src, cmd_q);
1032
1033e_ctx:
1034 ccp_dm_free(&ctx);
1035
1036e_key:
1037 ccp_dm_free(&key);
1038
1039 return ret;
1040}
1041
1042static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1043 struct ccp_cmd *cmd)
1044{
1045 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1046 struct ccp_dm_workarea key, ctx;
1047 struct ccp_data src, dst;
1048 struct ccp_op op;
1049 unsigned int unit_size, dm_offset;
1050 bool in_place = false;
e652399e
GH
1051 unsigned int sb_count;
1052 enum ccp_aes_type aestype;
63b94509
TL
1053 int ret;
1054
1055 switch (xts->unit_size) {
1056 case CCP_XTS_AES_UNIT_SIZE_16:
1057 unit_size = 16;
1058 break;
1059 case CCP_XTS_AES_UNIT_SIZE_512:
1060 unit_size = 512;
1061 break;
1062 case CCP_XTS_AES_UNIT_SIZE_1024:
1063 unit_size = 1024;
1064 break;
1065 case CCP_XTS_AES_UNIT_SIZE_2048:
1066 unit_size = 2048;
1067 break;
1068 case CCP_XTS_AES_UNIT_SIZE_4096:
1069 unit_size = 4096;
1070 break;
1071
1072 default:
1073 return -EINVAL;
1074 }
1075
e652399e
GH
1076 if (xts->key_len == AES_KEYSIZE_128)
1077 aestype = CCP_AES_TYPE_128;
5060ffc9
GH
1078 else if (xts->key_len == AES_KEYSIZE_256)
1079 aestype = CCP_AES_TYPE_256;
e652399e 1080 else
63b94509
TL
1081 return -EINVAL;
1082
1083 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1084 return -EINVAL;
1085
1086 if (xts->iv_len != AES_BLOCK_SIZE)
1087 return -EINVAL;
1088
1089 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1090 return -EINVAL;
1091
956ee21a
GH
1092 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1093 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
63b94509
TL
1094
1095 ret = -EIO;
1096 memset(&op, 0, sizeof(op));
1097 op.cmd_q = cmd_q;
4b394a23 1098 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
1099 op.sb_key = cmd_q->sb_key;
1100 op.sb_ctx = cmd_q->sb_ctx;
63b94509 1101 op.init = 1;
e652399e 1102 op.u.xts.type = aestype;
63b94509
TL
1103 op.u.xts.action = xts->action;
1104 op.u.xts.unit_size = xts->unit_size;
1105
e652399e
GH
1106 /* A version 3 device only supports 128-bit keys, which fits into a
1107 * single SB entry. A version 5 device uses a 512-bit vector, so two
1108 * SB entries.
63b94509 1109 */
e652399e
GH
1110 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1111 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1112 else
1113 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
63b94509 1114 ret = ccp_init_dm_workarea(&key, cmd_q,
e652399e 1115 sb_count * CCP_SB_BYTES,
63b94509
TL
1116 DMA_TO_DEVICE);
1117 if (ret)
1118 return ret;
1119
e652399e
GH
1120 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1121 /* All supported key sizes must be in little endian format.
1122 * Use the 256-bit byte swap passthru option to convert from
1123 * big endian to little endian.
1124 */
1125 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1126 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1127 ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1128 } else {
1129 /* Version 5 CCPs use a 512-bit space for the key: each portion
1130 * occupies 256 bits, or one entire slot, and is zero-padded.
1131 */
1132 unsigned int pad;
1133
1134 dm_offset = CCP_SB_BYTES;
1135 pad = dm_offset - xts->key_len;
1136 ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1137 ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
1138 xts->key_len);
1139 }
956ee21a
GH
1140 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1141 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1142 if (ret) {
1143 cmd->engine_error = cmd_q->cmd_error;
1144 goto e_key;
1145 }
1146
956ee21a 1147 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
1148 * for XTS is already in little endian format so no byte swapping
1149 * is needed.
1150 */
1151 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 1152 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
1153 DMA_BIDIRECTIONAL);
1154 if (ret)
1155 goto e_key;
1156
1157 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
956ee21a
GH
1158 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1159 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1160 if (ret) {
1161 cmd->engine_error = cmd_q->cmd_error;
1162 goto e_ctx;
1163 }
1164
1165 /* Prepare the input and output data workareas. For in-place
1166 * operations we need to set the dma direction to BIDIRECTIONAL
1167 * and copy the src workarea to the dst workarea.
1168 */
1169 if (sg_virt(xts->src) == sg_virt(xts->dst))
1170 in_place = true;
1171
1172 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1173 unit_size,
1174 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1175 if (ret)
1176 goto e_ctx;
1177
8db88467 1178 if (in_place) {
63b94509 1179 dst = src;
8db88467 1180 } else {
63b94509
TL
1181 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1182 unit_size, DMA_FROM_DEVICE);
1183 if (ret)
1184 goto e_src;
1185 }
1186
1187 /* Send data to the CCP AES engine */
1188 while (src.sg_wa.bytes_left) {
1189 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1190 if (!src.sg_wa.bytes_left)
1191 op.eom = 1;
1192
a43eb985 1193 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
63b94509
TL
1194 if (ret) {
1195 cmd->engine_error = cmd_q->cmd_error;
1196 goto e_dst;
1197 }
1198
1199 ccp_process_data(&src, &dst, &op);
1200 }
1201
1202 /* Retrieve the AES context - convert from LE to BE using
1203 * 32-byte (256-bit) byteswapping
1204 */
956ee21a
GH
1205 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1206 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1207 if (ret) {
1208 cmd->engine_error = cmd_q->cmd_error;
1209 goto e_dst;
1210 }
1211
1212 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1213 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1214 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1215
1216e_dst:
1217 if (!in_place)
1218 ccp_free_data(&dst, cmd_q);
1219
1220e_src:
1221 ccp_free_data(&src, cmd_q);
1222
1223e_ctx:
1224 ccp_dm_free(&ctx);
1225
1226e_key:
1227 ccp_dm_free(&key);
1228
1229 return ret;
1230}
1231
990672d4
GH
1232static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1233{
1234 struct ccp_des3_engine *des3 = &cmd->u.des3;
1235
1236 struct ccp_dm_workarea key, ctx;
1237 struct ccp_data src, dst;
1238 struct ccp_op op;
1239 unsigned int dm_offset;
1240 unsigned int len_singlekey;
1241 bool in_place = false;
1242 int ret;
1243
1244 /* Error checks */
1245 if (!cmd_q->ccp->vdata->perform->des3)
1246 return -EINVAL;
1247
1248 if (des3->key_len != DES3_EDE_KEY_SIZE)
1249 return -EINVAL;
1250
1251 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1252 (des3->mode == CCP_DES3_MODE_CBC)) &&
1253 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1254 return -EINVAL;
1255
1256 if (!des3->key || !des3->src || !des3->dst)
1257 return -EINVAL;
1258
1259 if (des3->mode != CCP_DES3_MODE_ECB) {
1260 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1261 return -EINVAL;
1262
1263 if (!des3->iv)
1264 return -EINVAL;
1265 }
1266
1267 ret = -EIO;
1268 /* Zero out all the fields of the command desc */
1269 memset(&op, 0, sizeof(op));
1270
1271 /* Set up the Function field */
1272 op.cmd_q = cmd_q;
1273 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1274 op.sb_key = cmd_q->sb_key;
1275
1276 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1277 op.u.des3.type = des3->type;
1278 op.u.des3.mode = des3->mode;
1279 op.u.des3.action = des3->action;
1280
1281 /*
1282 * All supported key sizes fit in a single (32-byte) KSB entry and
1283 * (like AES) must be in little endian format. Use the 256-bit byte
1284 * swap passthru option to convert from big endian to little endian.
1285 */
1286 ret = ccp_init_dm_workarea(&key, cmd_q,
1287 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1288 DMA_TO_DEVICE);
1289 if (ret)
1290 return ret;
1291
1292 /*
1293 * The contents of the key triplet are in the reverse order of what
1294 * is required by the engine. Copy the 3 pieces individually to put
1295 * them where they belong.
1296 */
1297 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1298
1299 len_singlekey = des3->key_len / 3;
1300 ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1301 des3->key, 0, len_singlekey);
1302 ccp_set_dm_area(&key, dm_offset + len_singlekey,
1303 des3->key, len_singlekey, len_singlekey);
1304 ccp_set_dm_area(&key, dm_offset,
1305 des3->key, 2 * len_singlekey, len_singlekey);
1306
1307 /* Copy the key to the SB */
1308 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1309 CCP_PASSTHRU_BYTESWAP_256BIT);
1310 if (ret) {
1311 cmd->engine_error = cmd_q->cmd_error;
1312 goto e_key;
1313 }
1314
1315 /*
1316 * The DES3 context fits in a single (32-byte) KSB entry and
1317 * must be in little endian format. Use the 256-bit byte swap
1318 * passthru option to convert from big endian to little endian.
1319 */
1320 if (des3->mode != CCP_DES3_MODE_ECB) {
1321 u32 load_mode;
1322
1323 op.sb_ctx = cmd_q->sb_ctx;
1324
1325 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1326 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1327 DMA_BIDIRECTIONAL);
1328 if (ret)
1329 goto e_key;
1330
1331 /* Load the context into the LSB */
1332 dm_offset = CCP_SB_BYTES - des3->iv_len;
1333 ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
1334
1335 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1336 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1337 else
1338 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1339 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1340 load_mode);
1341 if (ret) {
1342 cmd->engine_error = cmd_q->cmd_error;
1343 goto e_ctx;
1344 }
1345 }
1346
1347 /*
1348 * Prepare the input and output data workareas. For in-place
1349 * operations we need to set the dma direction to BIDIRECTIONAL
1350 * and copy the src workarea to the dst workarea.
1351 */
1352 if (sg_virt(des3->src) == sg_virt(des3->dst))
1353 in_place = true;
1354
1355 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1356 DES3_EDE_BLOCK_SIZE,
1357 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1358 if (ret)
1359 goto e_ctx;
1360
1361 if (in_place)
1362 dst = src;
1363 else {
1364 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1365 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1366 if (ret)
1367 goto e_src;
1368 }
1369
1370 /* Send data to the CCP DES3 engine */
1371 while (src.sg_wa.bytes_left) {
1372 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1373 if (!src.sg_wa.bytes_left) {
1374 op.eom = 1;
1375
1376 /* Since we don't retrieve the context in ECB mode
1377 * we have to wait for the operation to complete
1378 * on the last piece of data
1379 */
1380 op.soc = 0;
1381 }
1382
1383 ret = cmd_q->ccp->vdata->perform->des3(&op);
1384 if (ret) {
1385 cmd->engine_error = cmd_q->cmd_error;
1386 goto e_dst;
1387 }
1388
1389 ccp_process_data(&src, &dst, &op);
1390 }
1391
1392 if (des3->mode != CCP_DES3_MODE_ECB) {
1393 /* Retrieve the context and make BE */
1394 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1395 CCP_PASSTHRU_BYTESWAP_256BIT);
1396 if (ret) {
1397 cmd->engine_error = cmd_q->cmd_error;
1398 goto e_dst;
1399 }
1400
1401 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1402 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1403 dm_offset = CCP_SB_BYTES - des3->iv_len;
1404 else
1405 dm_offset = 0;
1406 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1407 DES3_EDE_BLOCK_SIZE);
1408 }
1409e_dst:
1410 if (!in_place)
1411 ccp_free_data(&dst, cmd_q);
1412
1413e_src:
1414 ccp_free_data(&src, cmd_q);
1415
1416e_ctx:
1417 if (des3->mode != CCP_DES3_MODE_ECB)
1418 ccp_dm_free(&ctx);
1419
1420e_key:
1421 ccp_dm_free(&key);
1422
1423 return ret;
1424}
1425
63b94509
TL
1426static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1427{
1428 struct ccp_sha_engine *sha = &cmd->u.sha;
1429 struct ccp_dm_workarea ctx;
1430 struct ccp_data src;
1431 struct ccp_op op;
4b394a23
GH
1432 unsigned int ioffset, ooffset;
1433 unsigned int digest_size;
1434 int sb_count;
1435 const void *init;
1436 u64 block_size;
1437 int ctx_size;
63b94509
TL
1438 int ret;
1439
4b394a23
GH
1440 switch (sha->type) {
1441 case CCP_SHA_TYPE_1:
1442 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1443 return -EINVAL;
1444 block_size = SHA1_BLOCK_SIZE;
1445 break;
1446 case CCP_SHA_TYPE_224:
1447 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1448 return -EINVAL;
1449 block_size = SHA224_BLOCK_SIZE;
1450 break;
1451 case CCP_SHA_TYPE_256:
1452 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1453 return -EINVAL;
1454 block_size = SHA256_BLOCK_SIZE;
1455 break;
ccebcf3f
GH
1456 case CCP_SHA_TYPE_384:
1457 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1458 || sha->ctx_len < SHA384_DIGEST_SIZE)
1459 return -EINVAL;
1460 block_size = SHA384_BLOCK_SIZE;
1461 break;
1462 case CCP_SHA_TYPE_512:
1463 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1464 || sha->ctx_len < SHA512_DIGEST_SIZE)
1465 return -EINVAL;
1466 block_size = SHA512_BLOCK_SIZE;
1467 break;
4b394a23 1468 default:
63b94509 1469 return -EINVAL;
4b394a23 1470 }
63b94509
TL
1471
1472 if (!sha->ctx)
1473 return -EINVAL;
1474
4b394a23 1475 if (!sha->final && (sha->src_len & (block_size - 1)))
63b94509
TL
1476 return -EINVAL;
1477
4b394a23
GH
1478 /* The version 3 device can't handle zero-length input */
1479 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
63b94509 1480
4b394a23
GH
1481 if (!sha->src_len) {
1482 unsigned int digest_len;
1483 const u8 *sha_zero;
63b94509 1484
4b394a23
GH
1485 /* Not final, just return */
1486 if (!sha->final)
1487 return 0;
63b94509 1488
4b394a23
GH
1489 /* CCP can't do a zero length sha operation so the
1490 * caller must buffer the data.
1491 */
1492 if (sha->msg_bits)
1493 return -EINVAL;
63b94509 1494
4b394a23
GH
1495 /* The CCP cannot perform zero-length sha operations
1496 * so the caller is required to buffer data for the
1497 * final operation. However, a sha operation for a
1498 * message with a total length of zero is valid so
1499 * known values are required to supply the result.
1500 */
1501 switch (sha->type) {
1502 case CCP_SHA_TYPE_1:
1503 sha_zero = sha1_zero_message_hash;
1504 digest_len = SHA1_DIGEST_SIZE;
1505 break;
1506 case CCP_SHA_TYPE_224:
1507 sha_zero = sha224_zero_message_hash;
1508 digest_len = SHA224_DIGEST_SIZE;
1509 break;
1510 case CCP_SHA_TYPE_256:
1511 sha_zero = sha256_zero_message_hash;
1512 digest_len = SHA256_DIGEST_SIZE;
1513 break;
1514 default:
1515 return -EINVAL;
1516 }
63b94509 1517
4b394a23
GH
1518 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1519 digest_len, 1);
1520
1521 return 0;
1522 }
63b94509
TL
1523 }
1524
4b394a23
GH
1525 /* Set variables used throughout */
1526 switch (sha->type) {
1527 case CCP_SHA_TYPE_1:
1528 digest_size = SHA1_DIGEST_SIZE;
1529 init = (void *) ccp_sha1_init;
1530 ctx_size = SHA1_DIGEST_SIZE;
1531 sb_count = 1;
1532 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1533 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1534 else
1535 ooffset = ioffset = 0;
1536 break;
1537 case CCP_SHA_TYPE_224:
1538 digest_size = SHA224_DIGEST_SIZE;
1539 init = (void *) ccp_sha224_init;
1540 ctx_size = SHA256_DIGEST_SIZE;
1541 sb_count = 1;
1542 ioffset = 0;
1543 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1544 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1545 else
1546 ooffset = 0;
1547 break;
1548 case CCP_SHA_TYPE_256:
1549 digest_size = SHA256_DIGEST_SIZE;
1550 init = (void *) ccp_sha256_init;
1551 ctx_size = SHA256_DIGEST_SIZE;
1552 sb_count = 1;
1553 ooffset = ioffset = 0;
1554 break;
ccebcf3f
GH
1555 case CCP_SHA_TYPE_384:
1556 digest_size = SHA384_DIGEST_SIZE;
1557 init = (void *) ccp_sha384_init;
1558 ctx_size = SHA512_DIGEST_SIZE;
1559 sb_count = 2;
1560 ioffset = 0;
1561 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1562 break;
1563 case CCP_SHA_TYPE_512:
1564 digest_size = SHA512_DIGEST_SIZE;
1565 init = (void *) ccp_sha512_init;
1566 ctx_size = SHA512_DIGEST_SIZE;
1567 sb_count = 2;
1568 ooffset = ioffset = 0;
1569 break;
4b394a23
GH
1570 default:
1571 ret = -EINVAL;
1572 goto e_data;
1573 }
63b94509 1574
4b394a23
GH
1575 /* For zero-length plaintext the src pointer is ignored;
1576 * otherwise both parts must be valid
1577 */
1578 if (sha->src_len && !sha->src)
1579 return -EINVAL;
63b94509
TL
1580
1581 memset(&op, 0, sizeof(op));
1582 op.cmd_q = cmd_q;
4b394a23
GH
1583 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1584 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
63b94509
TL
1585 op.u.sha.type = sha->type;
1586 op.u.sha.msg_bits = sha->msg_bits;
1587
ccebcf3f
GH
1588 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1589 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1590 * first slot, and the left half in the second. Each portion must then
1591 * be in little endian format: use the 256-bit byte swap option.
1592 */
4b394a23 1593 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
63b94509
TL
1594 DMA_BIDIRECTIONAL);
1595 if (ret)
1596 return ret;
c11baa02 1597 if (sha->first) {
c11baa02
TL
1598 switch (sha->type) {
1599 case CCP_SHA_TYPE_1:
c11baa02 1600 case CCP_SHA_TYPE_224:
c11baa02 1601 case CCP_SHA_TYPE_256:
4b394a23 1602 memcpy(ctx.address + ioffset, init, ctx_size);
c11baa02 1603 break;
ccebcf3f
GH
1604 case CCP_SHA_TYPE_384:
1605 case CCP_SHA_TYPE_512:
1606 memcpy(ctx.address + ctx_size / 2, init,
1607 ctx_size / 2);
1608 memcpy(ctx.address, init + ctx_size / 2,
1609 ctx_size / 2);
1610 break;
c11baa02
TL
1611 default:
1612 ret = -EINVAL;
1613 goto e_ctx;
1614 }
8db88467 1615 } else {
4b394a23
GH
1616 /* Restore the context */
1617 ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1618 sb_count * CCP_SB_BYTES);
8db88467 1619 }
c11baa02 1620
956ee21a
GH
1621 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1622 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1623 if (ret) {
1624 cmd->engine_error = cmd_q->cmd_error;
1625 goto e_ctx;
1626 }
1627
4b394a23
GH
1628 if (sha->src) {
1629 /* Send data to the CCP SHA engine; block_size is set above */
1630 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1631 block_size, DMA_TO_DEVICE);
1632 if (ret)
1633 goto e_ctx;
63b94509 1634
4b394a23
GH
1635 while (src.sg_wa.bytes_left) {
1636 ccp_prepare_data(&src, NULL, &op, block_size, false);
1637 if (sha->final && !src.sg_wa.bytes_left)
1638 op.eom = 1;
1639
1640 ret = cmd_q->ccp->vdata->perform->sha(&op);
1641 if (ret) {
1642 cmd->engine_error = cmd_q->cmd_error;
1643 goto e_data;
1644 }
63b94509 1645
4b394a23
GH
1646 ccp_process_data(&src, NULL, &op);
1647 }
1648 } else {
1649 op.eom = 1;
a43eb985 1650 ret = cmd_q->ccp->vdata->perform->sha(&op);
63b94509
TL
1651 if (ret) {
1652 cmd->engine_error = cmd_q->cmd_error;
1653 goto e_data;
1654 }
63b94509
TL
1655 }
1656
1657 /* Retrieve the SHA context - convert from LE to BE using
1658 * 32-byte (256-bit) byteswapping to BE
1659 */
956ee21a
GH
1660 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1661 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1662 if (ret) {
1663 cmd->engine_error = cmd_q->cmd_error;
1664 goto e_data;
1665 }
1666
4b394a23
GH
1667 if (sha->final) {
1668 /* Finishing up, so get the digest */
c11baa02
TL
1669 switch (sha->type) {
1670 case CCP_SHA_TYPE_1:
c11baa02 1671 case CCP_SHA_TYPE_224:
c11baa02 1672 case CCP_SHA_TYPE_256:
4b394a23
GH
1673 ccp_get_dm_area(&ctx, ooffset,
1674 sha->ctx, 0,
1675 digest_size);
c11baa02 1676 break;
ccebcf3f
GH
1677 case CCP_SHA_TYPE_384:
1678 case CCP_SHA_TYPE_512:
1679 ccp_get_dm_area(&ctx, 0,
1680 sha->ctx, LSB_ITEM_SIZE - ooffset,
1681 LSB_ITEM_SIZE);
1682 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1683 sha->ctx, 0,
1684 LSB_ITEM_SIZE - ooffset);
1685 break;
c11baa02
TL
1686 default:
1687 ret = -EINVAL;
4b394a23 1688 goto e_ctx;
c11baa02 1689 }
4b394a23
GH
1690 } else {
1691 /* Stash the context */
1692 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1693 sb_count * CCP_SB_BYTES);
1694 }
1695
1696 if (sha->final && sha->opad) {
1697 /* HMAC operation, recursively perform final SHA */
1698 struct ccp_cmd hmac_cmd;
1699 struct scatterlist sg;
1700 u8 *hmac_buf;
c11baa02
TL
1701
1702 if (sha->opad_len != block_size) {
1703 ret = -EINVAL;
1704 goto e_data;
1705 }
1706
1707 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1708 if (!hmac_buf) {
1709 ret = -ENOMEM;
1710 goto e_data;
1711 }
1712 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1713
1714 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
4b394a23
GH
1715 switch (sha->type) {
1716 case CCP_SHA_TYPE_1:
1717 case CCP_SHA_TYPE_224:
1718 case CCP_SHA_TYPE_256:
1719 memcpy(hmac_buf + block_size,
1720 ctx.address + ooffset,
1721 digest_size);
1722 break;
ccebcf3f
GH
1723 case CCP_SHA_TYPE_384:
1724 case CCP_SHA_TYPE_512:
1725 memcpy(hmac_buf + block_size,
1726 ctx.address + LSB_ITEM_SIZE + ooffset,
1727 LSB_ITEM_SIZE);
1728 memcpy(hmac_buf + block_size +
1729 (LSB_ITEM_SIZE - ooffset),
1730 ctx.address,
1731 LSB_ITEM_SIZE);
1732 break;
4b394a23
GH
1733 default:
1734 ret = -EINVAL;
1735 goto e_ctx;
1736 }
c11baa02
TL
1737
1738 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1739 hmac_cmd.engine = CCP_ENGINE_SHA;
1740 hmac_cmd.u.sha.type = sha->type;
1741 hmac_cmd.u.sha.ctx = sha->ctx;
1742 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1743 hmac_cmd.u.sha.src = &sg;
1744 hmac_cmd.u.sha.src_len = block_size + digest_size;
1745 hmac_cmd.u.sha.opad = NULL;
1746 hmac_cmd.u.sha.opad_len = 0;
1747 hmac_cmd.u.sha.first = 1;
1748 hmac_cmd.u.sha.final = 1;
1749 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1750
1751 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1752 if (ret)
1753 cmd->engine_error = hmac_cmd.engine_error;
1754
1755 kfree(hmac_buf);
1756 }
1757
63b94509 1758e_data:
4b394a23
GH
1759 if (sha->src)
1760 ccp_free_data(&src, cmd_q);
63b94509
TL
1761
1762e_ctx:
1763 ccp_dm_free(&ctx);
1764
1765 return ret;
1766}
1767
1768static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1769{
1770 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
6ba46c7d 1771 struct ccp_dm_workarea exp, src, dst;
63b94509 1772 struct ccp_op op;
956ee21a 1773 unsigned int sb_count, i_len, o_len;
63b94509
TL
1774 int ret;
1775
e28c190d
GH
1776 /* Check against the maximum allowable size, in bits */
1777 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
63b94509
TL
1778 return -EINVAL;
1779
1780 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1781 return -EINVAL;
1782
6ba46c7d
GH
1783 memset(&op, 0, sizeof(op));
1784 op.cmd_q = cmd_q;
1785 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1786
63b94509
TL
1787 /* The RSA modulus must precede the message being acted upon, so
1788 * it must be copied to a DMA area where the message and the
1789 * modulus can be concatenated. Therefore the input buffer
1790 * length required is twice the output buffer length (which
6ba46c7d
GH
1791 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1792 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1793 * required.
63b94509 1794 */
6ba46c7d 1795 o_len = 32 * ((rsa->key_size + 255) / 256);
63b94509
TL
1796 i_len = o_len * 2;
1797
d634baea 1798 sb_count = 0;
6ba46c7d
GH
1799 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1800 /* sb_count is the number of storage block slots required
1801 * for the modulus.
1802 */
1803 sb_count = o_len / CCP_SB_BYTES;
1804 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1805 sb_count);
1806 if (!op.sb_key)
1807 return -EIO;
1808 } else {
1809 /* A version 5 device allows a modulus size that will not fit
1810 * in the LSB, so the command will transfer it from memory.
1811 * Set the sb key to the default, even though it's not used.
1812 */
1813 op.sb_key = cmd_q->sb_key;
1814 }
63b94509 1815
6ba46c7d
GH
1816 /* The RSA exponent must be in little endian format. Reverse its
1817 * byte order.
63b94509
TL
1818 */
1819 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1820 if (ret)
956ee21a 1821 goto e_sb;
63b94509 1822
83d650ab 1823 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
355eba5d
TL
1824 if (ret)
1825 goto e_exp;
6ba46c7d
GH
1826
1827 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1828 /* Copy the exponent to the local storage block, using
1829 * as many 32-byte blocks as were allocated above. It's
1830 * already little endian, so no further change is required.
1831 */
1832 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1833 CCP_PASSTHRU_BYTESWAP_NOOP);
1834 if (ret) {
1835 cmd->engine_error = cmd_q->cmd_error;
1836 goto e_exp;
1837 }
1838 } else {
1839 /* The exponent can be retrieved from memory via DMA. */
1840 op.exp.u.dma.address = exp.dma.address;
1841 op.exp.u.dma.offset = 0;
63b94509
TL
1842 }
1843
1844 /* Concatenate the modulus and the message. Both the modulus and
1845 * the operands must be in little endian format. Since the input
1846 * is in big endian format it must be converted.
1847 */
1848 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1849 if (ret)
1850 goto e_exp;
1851
83d650ab 1852 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
355eba5d
TL
1853 if (ret)
1854 goto e_src;
83d650ab 1855 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
355eba5d
TL
1856 if (ret)
1857 goto e_src;
63b94509
TL
1858
1859 /* Prepare the output area for the operation */
6ba46c7d 1860 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
63b94509
TL
1861 if (ret)
1862 goto e_src;
1863
1864 op.soc = 1;
1865 op.src.u.dma.address = src.dma.address;
1866 op.src.u.dma.offset = 0;
1867 op.src.u.dma.length = i_len;
6ba46c7d 1868 op.dst.u.dma.address = dst.dma.address;
63b94509
TL
1869 op.dst.u.dma.offset = 0;
1870 op.dst.u.dma.length = o_len;
1871
1872 op.u.rsa.mod_size = rsa->key_size;
1873 op.u.rsa.input_len = i_len;
1874
a43eb985 1875 ret = cmd_q->ccp->vdata->perform->rsa(&op);
63b94509
TL
1876 if (ret) {
1877 cmd->engine_error = cmd_q->cmd_error;
1878 goto e_dst;
1879 }
1880
6ba46c7d 1881 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
63b94509
TL
1882
1883e_dst:
6ba46c7d 1884 ccp_dm_free(&dst);
63b94509
TL
1885
1886e_src:
1887 ccp_dm_free(&src);
1888
1889e_exp:
1890 ccp_dm_free(&exp);
1891
956ee21a 1892e_sb:
d634baea 1893 if (sb_count)
6ba46c7d 1894 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
63b94509
TL
1895
1896 return ret;
1897}
1898
1899static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1900 struct ccp_cmd *cmd)
1901{
1902 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1903 struct ccp_dm_workarea mask;
1904 struct ccp_data src, dst;
1905 struct ccp_op op;
1906 bool in_place = false;
1907 unsigned int i;
4b394a23 1908 int ret = 0;
63b94509
TL
1909
1910 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1911 return -EINVAL;
1912
1913 if (!pt->src || !pt->dst)
1914 return -EINVAL;
1915
1916 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1917 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1918 return -EINVAL;
1919 if (!pt->mask)
1920 return -EINVAL;
1921 }
1922
956ee21a 1923 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
63b94509
TL
1924
1925 memset(&op, 0, sizeof(op));
1926 op.cmd_q = cmd_q;
4b394a23 1927 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1928
1929 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1930 /* Load the mask */
956ee21a 1931 op.sb_key = cmd_q->sb_key;
63b94509
TL
1932
1933 ret = ccp_init_dm_workarea(&mask, cmd_q,
956ee21a
GH
1934 CCP_PASSTHRU_SB_COUNT *
1935 CCP_SB_BYTES,
63b94509
TL
1936 DMA_TO_DEVICE);
1937 if (ret)
1938 return ret;
1939
1940 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
956ee21a
GH
1941 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1942 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1943 if (ret) {
1944 cmd->engine_error = cmd_q->cmd_error;
1945 goto e_mask;
1946 }
1947 }
1948
1949 /* Prepare the input and output data workareas. For in-place
1950 * operations we need to set the dma direction to BIDIRECTIONAL
1951 * and copy the src workarea to the dst workarea.
1952 */
1953 if (sg_virt(pt->src) == sg_virt(pt->dst))
1954 in_place = true;
1955
1956 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1957 CCP_PASSTHRU_MASKSIZE,
1958 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1959 if (ret)
1960 goto e_mask;
1961
8db88467 1962 if (in_place) {
63b94509 1963 dst = src;
8db88467 1964 } else {
63b94509
TL
1965 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1966 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1967 if (ret)
1968 goto e_src;
1969 }
1970
1971 /* Send data to the CCP Passthru engine
1972 * Because the CCP engine works on a single source and destination
1973 * dma address at a time, each entry in the source scatterlist
1974 * (after the dma_map_sg call) must be less than or equal to the
1975 * (remaining) length in the destination scatterlist entry and the
1976 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1977 */
1978 dst.sg_wa.sg_used = 0;
1979 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1980 if (!dst.sg_wa.sg ||
1981 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1982 ret = -EINVAL;
1983 goto e_dst;
1984 }
1985
1986 if (i == src.sg_wa.dma_count) {
1987 op.eom = 1;
1988 op.soc = 1;
1989 }
1990
1991 op.src.type = CCP_MEMTYPE_SYSTEM;
1992 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1993 op.src.u.dma.offset = 0;
1994 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1995
1996 op.dst.type = CCP_MEMTYPE_SYSTEM;
1997 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
80e84c16
DJ
1998 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1999 op.dst.u.dma.length = op.src.u.dma.length;
63b94509 2000
a43eb985 2001 ret = cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
2002 if (ret) {
2003 cmd->engine_error = cmd_q->cmd_error;
2004 goto e_dst;
2005 }
2006
2007 dst.sg_wa.sg_used += src.sg_wa.sg->length;
2008 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
2009 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2010 dst.sg_wa.sg_used = 0;
2011 }
2012 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2013 }
2014
2015e_dst:
2016 if (!in_place)
2017 ccp_free_data(&dst, cmd_q);
2018
2019e_src:
2020 ccp_free_data(&src, cmd_q);
2021
2022e_mask:
2023 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2024 ccp_dm_free(&mask);
2025
2026 return ret;
2027}
2028
58ea8abf
GH
2029static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2030 struct ccp_cmd *cmd)
2031{
2032 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2033 struct ccp_dm_workarea mask;
2034 struct ccp_op op;
2035 int ret;
2036
2037 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2038 return -EINVAL;
2039
2040 if (!pt->src_dma || !pt->dst_dma)
2041 return -EINVAL;
2042
2043 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2044 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2045 return -EINVAL;
2046 if (!pt->mask)
2047 return -EINVAL;
2048 }
2049
956ee21a 2050 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
58ea8abf
GH
2051
2052 memset(&op, 0, sizeof(op));
2053 op.cmd_q = cmd_q;
bce386af 2054 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
58ea8abf
GH
2055
2056 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2057 /* Load the mask */
956ee21a 2058 op.sb_key = cmd_q->sb_key;
58ea8abf
GH
2059
2060 mask.length = pt->mask_len;
2061 mask.dma.address = pt->mask;
2062 mask.dma.length = pt->mask_len;
2063
956ee21a 2064 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
58ea8abf
GH
2065 CCP_PASSTHRU_BYTESWAP_NOOP);
2066 if (ret) {
2067 cmd->engine_error = cmd_q->cmd_error;
2068 return ret;
2069 }
2070 }
2071
2072 /* Send data to the CCP Passthru engine */
2073 op.eom = 1;
2074 op.soc = 1;
2075
2076 op.src.type = CCP_MEMTYPE_SYSTEM;
2077 op.src.u.dma.address = pt->src_dma;
2078 op.src.u.dma.offset = 0;
2079 op.src.u.dma.length = pt->src_len;
2080
2081 op.dst.type = CCP_MEMTYPE_SYSTEM;
2082 op.dst.u.dma.address = pt->dst_dma;
2083 op.dst.u.dma.offset = 0;
2084 op.dst.u.dma.length = pt->src_len;
2085
a43eb985 2086 ret = cmd_q->ccp->vdata->perform->passthru(&op);
58ea8abf
GH
2087 if (ret)
2088 cmd->engine_error = cmd_q->cmd_error;
2089
2090 return ret;
2091}
2092
63b94509
TL
2093static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2094{
2095 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2096 struct ccp_dm_workarea src, dst;
2097 struct ccp_op op;
2098 int ret;
2099 u8 *save;
2100
2101 if (!ecc->u.mm.operand_1 ||
2102 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2103 return -EINVAL;
2104
2105 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2106 if (!ecc->u.mm.operand_2 ||
2107 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2108 return -EINVAL;
2109
2110 if (!ecc->u.mm.result ||
2111 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2112 return -EINVAL;
2113
2114 memset(&op, 0, sizeof(op));
2115 op.cmd_q = cmd_q;
4b394a23 2116 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2117
2118 /* Concatenate the modulus and the operands. Both the modulus and
2119 * the operands must be in little endian format. Since the input
2120 * is in big endian format it must be converted and placed in a
2121 * fixed length buffer.
2122 */
2123 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2124 DMA_TO_DEVICE);
2125 if (ret)
2126 return ret;
2127
2128 /* Save the workarea address since it is updated in order to perform
2129 * the concatenation
2130 */
2131 save = src.address;
2132
2133 /* Copy the ECC modulus */
83d650ab 2134 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2135 if (ret)
2136 goto e_src;
63b94509
TL
2137 src.address += CCP_ECC_OPERAND_SIZE;
2138
2139 /* Copy the first operand */
83d650ab
GH
2140 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2141 ecc->u.mm.operand_1_len);
355eba5d
TL
2142 if (ret)
2143 goto e_src;
63b94509
TL
2144 src.address += CCP_ECC_OPERAND_SIZE;
2145
2146 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2147 /* Copy the second operand */
83d650ab
GH
2148 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2149 ecc->u.mm.operand_2_len);
355eba5d
TL
2150 if (ret)
2151 goto e_src;
63b94509
TL
2152 src.address += CCP_ECC_OPERAND_SIZE;
2153 }
2154
2155 /* Restore the workarea address */
2156 src.address = save;
2157
2158 /* Prepare the output area for the operation */
2159 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2160 DMA_FROM_DEVICE);
2161 if (ret)
2162 goto e_src;
2163
2164 op.soc = 1;
2165 op.src.u.dma.address = src.dma.address;
2166 op.src.u.dma.offset = 0;
2167 op.src.u.dma.length = src.length;
2168 op.dst.u.dma.address = dst.dma.address;
2169 op.dst.u.dma.offset = 0;
2170 op.dst.u.dma.length = dst.length;
2171
2172 op.u.ecc.function = cmd->u.ecc.function;
2173
a43eb985 2174 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2175 if (ret) {
2176 cmd->engine_error = cmd_q->cmd_error;
2177 goto e_dst;
2178 }
2179
2180 ecc->ecc_result = le16_to_cpup(
2181 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2182 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2183 ret = -EIO;
2184 goto e_dst;
2185 }
2186
2187 /* Save the ECC result */
83d650ab
GH
2188 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2189 CCP_ECC_MODULUS_BYTES);
63b94509
TL
2190
2191e_dst:
2192 ccp_dm_free(&dst);
2193
2194e_src:
2195 ccp_dm_free(&src);
2196
2197 return ret;
2198}
2199
2200static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2201{
2202 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2203 struct ccp_dm_workarea src, dst;
2204 struct ccp_op op;
2205 int ret;
2206 u8 *save;
2207
2208 if (!ecc->u.pm.point_1.x ||
2209 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2210 !ecc->u.pm.point_1.y ||
2211 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2212 return -EINVAL;
2213
2214 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2215 if (!ecc->u.pm.point_2.x ||
2216 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2217 !ecc->u.pm.point_2.y ||
2218 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2219 return -EINVAL;
2220 } else {
2221 if (!ecc->u.pm.domain_a ||
2222 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2223 return -EINVAL;
2224
2225 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2226 if (!ecc->u.pm.scalar ||
2227 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2228 return -EINVAL;
2229 }
2230
2231 if (!ecc->u.pm.result.x ||
2232 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2233 !ecc->u.pm.result.y ||
2234 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2235 return -EINVAL;
2236
2237 memset(&op, 0, sizeof(op));
2238 op.cmd_q = cmd_q;
4b394a23 2239 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2240
2241 /* Concatenate the modulus and the operands. Both the modulus and
2242 * the operands must be in little endian format. Since the input
2243 * is in big endian format it must be converted and placed in a
2244 * fixed length buffer.
2245 */
2246 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2247 DMA_TO_DEVICE);
2248 if (ret)
2249 return ret;
2250
2251 /* Save the workarea address since it is updated in order to perform
2252 * the concatenation
2253 */
2254 save = src.address;
2255
2256 /* Copy the ECC modulus */
83d650ab 2257 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2258 if (ret)
2259 goto e_src;
63b94509
TL
2260 src.address += CCP_ECC_OPERAND_SIZE;
2261
2262 /* Copy the first point X and Y coordinate */
83d650ab
GH
2263 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2264 ecc->u.pm.point_1.x_len);
355eba5d
TL
2265 if (ret)
2266 goto e_src;
63b94509 2267 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2268 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2269 ecc->u.pm.point_1.y_len);
355eba5d
TL
2270 if (ret)
2271 goto e_src;
63b94509
TL
2272 src.address += CCP_ECC_OPERAND_SIZE;
2273
4b394a23 2274 /* Set the first point Z coordinate to 1 */
8db88467 2275 *src.address = 0x01;
63b94509
TL
2276 src.address += CCP_ECC_OPERAND_SIZE;
2277
2278 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2279 /* Copy the second point X and Y coordinate */
83d650ab
GH
2280 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2281 ecc->u.pm.point_2.x_len);
355eba5d
TL
2282 if (ret)
2283 goto e_src;
63b94509 2284 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2285 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2286 ecc->u.pm.point_2.y_len);
355eba5d
TL
2287 if (ret)
2288 goto e_src;
63b94509
TL
2289 src.address += CCP_ECC_OPERAND_SIZE;
2290
4b394a23 2291 /* Set the second point Z coordinate to 1 */
8db88467 2292 *src.address = 0x01;
63b94509
TL
2293 src.address += CCP_ECC_OPERAND_SIZE;
2294 } else {
2295 /* Copy the Domain "a" parameter */
83d650ab
GH
2296 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2297 ecc->u.pm.domain_a_len);
355eba5d
TL
2298 if (ret)
2299 goto e_src;
63b94509
TL
2300 src.address += CCP_ECC_OPERAND_SIZE;
2301
2302 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2303 /* Copy the scalar value */
83d650ab
GH
2304 ret = ccp_reverse_set_dm_area(&src, 0,
2305 ecc->u.pm.scalar, 0,
2306 ecc->u.pm.scalar_len);
355eba5d
TL
2307 if (ret)
2308 goto e_src;
63b94509
TL
2309 src.address += CCP_ECC_OPERAND_SIZE;
2310 }
2311 }
2312
2313 /* Restore the workarea address */
2314 src.address = save;
2315
2316 /* Prepare the output area for the operation */
2317 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2318 DMA_FROM_DEVICE);
2319 if (ret)
2320 goto e_src;
2321
2322 op.soc = 1;
2323 op.src.u.dma.address = src.dma.address;
2324 op.src.u.dma.offset = 0;
2325 op.src.u.dma.length = src.length;
2326 op.dst.u.dma.address = dst.dma.address;
2327 op.dst.u.dma.offset = 0;
2328 op.dst.u.dma.length = dst.length;
2329
2330 op.u.ecc.function = cmd->u.ecc.function;
2331
a43eb985 2332 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2333 if (ret) {
2334 cmd->engine_error = cmd_q->cmd_error;
2335 goto e_dst;
2336 }
2337
2338 ecc->ecc_result = le16_to_cpup(
2339 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2340 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2341 ret = -EIO;
2342 goto e_dst;
2343 }
2344
2345 /* Save the workarea address since it is updated as we walk through
2346 * to copy the point math result
2347 */
2348 save = dst.address;
2349
2350 /* Save the ECC result X and Y coordinates */
83d650ab 2351 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
63b94509
TL
2352 CCP_ECC_MODULUS_BYTES);
2353 dst.address += CCP_ECC_OUTPUT_SIZE;
83d650ab 2354 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
63b94509
TL
2355 CCP_ECC_MODULUS_BYTES);
2356 dst.address += CCP_ECC_OUTPUT_SIZE;
2357
2358 /* Restore the workarea address */
2359 dst.address = save;
2360
2361e_dst:
2362 ccp_dm_free(&dst);
2363
2364e_src:
2365 ccp_dm_free(&src);
2366
2367 return ret;
2368}
2369
2370static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2371{
2372 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2373
2374 ecc->ecc_result = 0;
2375
2376 if (!ecc->mod ||
2377 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2378 return -EINVAL;
2379
2380 switch (ecc->function) {
2381 case CCP_ECC_FUNCTION_MMUL_384BIT:
2382 case CCP_ECC_FUNCTION_MADD_384BIT:
2383 case CCP_ECC_FUNCTION_MINV_384BIT:
2384 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2385
2386 case CCP_ECC_FUNCTION_PADD_384BIT:
2387 case CCP_ECC_FUNCTION_PMUL_384BIT:
2388 case CCP_ECC_FUNCTION_PDBL_384BIT:
2389 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2390
2391 default:
2392 return -EINVAL;
2393 }
2394}
2395
2396int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2397{
2398 int ret;
2399
2400 cmd->engine_error = 0;
2401 cmd_q->cmd_error = 0;
2402 cmd_q->int_rcvd = 0;
bb4e89b3 2403 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
63b94509
TL
2404
2405 switch (cmd->engine) {
2406 case CCP_ENGINE_AES:
2407 ret = ccp_run_aes_cmd(cmd_q, cmd);
2408 break;
2409 case CCP_ENGINE_XTS_AES_128:
2410 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2411 break;
990672d4
GH
2412 case CCP_ENGINE_DES3:
2413 ret = ccp_run_des3_cmd(cmd_q, cmd);
2414 break;
63b94509
TL
2415 case CCP_ENGINE_SHA:
2416 ret = ccp_run_sha_cmd(cmd_q, cmd);
2417 break;
2418 case CCP_ENGINE_RSA:
2419 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2420 break;
2421 case CCP_ENGINE_PASSTHRU:
58ea8abf
GH
2422 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2423 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2424 else
2425 ret = ccp_run_passthru_cmd(cmd_q, cmd);
63b94509
TL
2426 break;
2427 case CCP_ENGINE_ECC:
2428 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2429 break;
2430 default:
2431 ret = -EINVAL;
2432 }
2433
2434 return ret;
2435}