]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/ccp/ccp-ops.c
crypto: ccp - Add support for valid authsize values less than 16
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / ccp / ccp-ops.c
CommitLineData
63b94509
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
68cc652f 4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
63b94509
TL
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
a43eb985 7 * Author: Gary R Hook <gary.hook@amd.com>
63b94509
TL
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
63b94509 17#include <linux/interrupt.h>
63b94509 18#include <crypto/scatterwalk.h>
990672d4 19#include <crypto/des.h>
ea0375af 20#include <linux/ccp.h>
63b94509
TL
21
22#include "ccp-dev.h"
23
c11baa02 24/* SHA initial context values */
4b394a23 25static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
4b394a23 28 cpu_to_be32(SHA1_H4),
c11baa02
TL
29};
30
4b394a23 31static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
36};
37
4b394a23 38static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
c11baa02
TL
39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
43};
44
ccebcf3f
GH
45static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
50};
51
52static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
57};
58
4b394a23
GH
59#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
60 ccp_gen_jobid(ccp) : 0)
61
63b94509
TL
62static u32 ccp_gen_jobid(struct ccp_device *ccp)
63{
64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
65}
66
67static void ccp_sg_free(struct ccp_sg_workarea *wa)
68{
69 if (wa->dma_count)
70 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
71
72 wa->dma_count = 0;
73}
74
75static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
81a59f00 76 struct scatterlist *sg, u64 len,
63b94509
TL
77 enum dma_data_direction dma_dir)
78{
79 memset(wa, 0, sizeof(*wa));
80
81 wa->sg = sg;
82 if (!sg)
83 return 0;
84
fb43f694
TL
85 wa->nents = sg_nents_for_len(sg, len);
86 if (wa->nents < 0)
87 return wa->nents;
88
63b94509
TL
89 wa->bytes_left = len;
90 wa->sg_used = 0;
91
92 if (len == 0)
93 return 0;
94
95 if (dma_dir == DMA_NONE)
96 return 0;
97
98 wa->dma_sg = sg;
99 wa->dma_dev = dev;
100 wa->dma_dir = dma_dir;
101 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
102 if (!wa->dma_count)
103 return -ENOMEM;
104
63b94509
TL
105 return 0;
106}
107
108static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
109{
81a59f00 110 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
63b94509
TL
111
112 if (!wa->sg)
113 return;
114
115 wa->sg_used += nbytes;
116 wa->bytes_left -= nbytes;
117 if (wa->sg_used == wa->sg->length) {
118 wa->sg = sg_next(wa->sg);
119 wa->sg_used = 0;
120 }
121}
122
123static void ccp_dm_free(struct ccp_dm_workarea *wa)
124{
125 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
126 if (wa->address)
127 dma_pool_free(wa->dma_pool, wa->address,
128 wa->dma.address);
129 } else {
130 if (wa->dma.address)
131 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
132 wa->dma.dir);
133 kfree(wa->address);
134 }
135
136 wa->address = NULL;
137 wa->dma.address = 0;
138}
139
140static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
141 struct ccp_cmd_queue *cmd_q,
142 unsigned int len,
143 enum dma_data_direction dir)
144{
145 memset(wa, 0, sizeof(*wa));
146
147 if (!len)
148 return 0;
149
150 wa->dev = cmd_q->ccp->dev;
151 wa->length = len;
152
153 if (len <= CCP_DMAPOOL_MAX_SIZE) {
154 wa->dma_pool = cmd_q->dma_pool;
155
156 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
157 &wa->dma.address);
158 if (!wa->address)
159 return -ENOMEM;
160
161 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
162
163 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
164 } else {
165 wa->address = kzalloc(len, GFP_KERNEL);
166 if (!wa->address)
167 return -ENOMEM;
168
169 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
170 dir);
ef4064bb 171 if (dma_mapping_error(wa->dev, wa->dma.address))
63b94509
TL
172 return -ENOMEM;
173
174 wa->dma.length = len;
175 }
176 wa->dma.dir = dir;
177
178 return 0;
179}
180
b5ba0835
GH
181static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
182 struct scatterlist *sg, unsigned int sg_offset,
183 unsigned int len)
63b94509
TL
184{
185 WARN_ON(!wa->address);
186
b5ba0835
GH
187 if (len > (wa->length - wa_offset))
188 return -EINVAL;
189
63b94509
TL
190 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
191 0);
b5ba0835 192 return 0;
63b94509
TL
193}
194
195static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
196 struct scatterlist *sg, unsigned int sg_offset,
197 unsigned int len)
198{
199 WARN_ON(!wa->address);
200
201 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
202 1);
203}
204
355eba5d 205static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
83d650ab 206 unsigned int wa_offset,
355eba5d 207 struct scatterlist *sg,
83d650ab
GH
208 unsigned int sg_offset,
209 unsigned int len)
63b94509 210{
83d650ab 211 u8 *p, *q;
b5ba0835 212 int rc;
83d650ab 213
b5ba0835
GH
214 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
215 if (rc)
216 return rc;
83d650ab
GH
217
218 p = wa->address + wa_offset;
219 q = p + len - 1;
220 while (p < q) {
221 *p = *p ^ *q;
222 *q = *p ^ *q;
223 *p = *p ^ *q;
224 p++;
225 q--;
63b94509 226 }
355eba5d 227 return 0;
63b94509
TL
228}
229
230static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
83d650ab 231 unsigned int wa_offset,
63b94509 232 struct scatterlist *sg,
83d650ab 233 unsigned int sg_offset,
63b94509
TL
234 unsigned int len)
235{
83d650ab
GH
236 u8 *p, *q;
237
238 p = wa->address + wa_offset;
239 q = p + len - 1;
240 while (p < q) {
241 *p = *p ^ *q;
242 *q = *p ^ *q;
243 *p = *p ^ *q;
244 p++;
245 q--;
63b94509 246 }
83d650ab
GH
247
248 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
63b94509
TL
249}
250
251static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
252{
253 ccp_dm_free(&data->dm_wa);
254 ccp_sg_free(&data->sg_wa);
255}
256
257static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
81a59f00 258 struct scatterlist *sg, u64 sg_len,
63b94509
TL
259 unsigned int dm_len,
260 enum dma_data_direction dir)
261{
262 int ret;
263
264 memset(data, 0, sizeof(*data));
265
266 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
267 dir);
268 if (ret)
269 goto e_err;
270
271 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
272 if (ret)
273 goto e_err;
274
275 return 0;
276
277e_err:
278 ccp_free_data(data, cmd_q);
279
280 return ret;
281}
282
283static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
284{
285 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
286 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
287 unsigned int buf_count, nbytes;
288
289 /* Clear the buffer if setting it */
290 if (!from)
291 memset(dm_wa->address, 0, dm_wa->length);
292
293 if (!sg_wa->sg)
294 return 0;
295
81a59f00
TL
296 /* Perform the copy operation
297 * nbytes will always be <= UINT_MAX because dm_wa->length is
298 * an unsigned int
299 */
300 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
63b94509
TL
301 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
302 nbytes, from);
303
304 /* Update the structures and generate the count */
305 buf_count = 0;
306 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
81a59f00
TL
307 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
308 dm_wa->length - buf_count);
309 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
63b94509
TL
310
311 buf_count += nbytes;
312 ccp_update_sg_workarea(sg_wa, nbytes);
313 }
314
315 return buf_count;
316}
317
318static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
319{
320 return ccp_queue_buf(data, 0);
321}
322
323static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
324{
325 return ccp_queue_buf(data, 1);
326}
327
328static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
329 struct ccp_op *op, unsigned int block_size,
330 bool blocksize_op)
331{
332 unsigned int sg_src_len, sg_dst_len, op_len;
333
334 /* The CCP can only DMA from/to one address each per operation. This
335 * requires that we find the smallest DMA area between the source
81a59f00
TL
336 * and destination. The resulting len values will always be <= UINT_MAX
337 * because the dma length is an unsigned int.
63b94509 338 */
81a59f00
TL
339 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
340 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
63b94509
TL
341
342 if (dst) {
81a59f00
TL
343 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
344 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
63b94509 345 op_len = min(sg_src_len, sg_dst_len);
8db88467 346 } else {
63b94509 347 op_len = sg_src_len;
8db88467 348 }
63b94509
TL
349
350 /* The data operation length will be at least block_size in length
351 * or the smaller of available sg room remaining for the source or
352 * the destination
353 */
354 op_len = max(op_len, block_size);
355
356 /* Unless we have to buffer data, there's no reason to wait */
357 op->soc = 0;
358
359 if (sg_src_len < block_size) {
360 /* Not enough data in the sg element, so it
361 * needs to be buffered into a blocksize chunk
362 */
363 int cp_len = ccp_fill_queue_buf(src);
364
365 op->soc = 1;
366 op->src.u.dma.address = src->dm_wa.dma.address;
367 op->src.u.dma.offset = 0;
368 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
369 } else {
370 /* Enough data in the sg element, but we need to
371 * adjust for any previously copied data
372 */
373 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
374 op->src.u.dma.offset = src->sg_wa.sg_used;
375 op->src.u.dma.length = op_len & ~(block_size - 1);
376
377 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
378 }
379
380 if (dst) {
381 if (sg_dst_len < block_size) {
382 /* Not enough room in the sg element or we're on the
383 * last piece of data (when using padding), so the
384 * output needs to be buffered into a blocksize chunk
385 */
386 op->soc = 1;
387 op->dst.u.dma.address = dst->dm_wa.dma.address;
388 op->dst.u.dma.offset = 0;
389 op->dst.u.dma.length = op->src.u.dma.length;
390 } else {
391 /* Enough room in the sg element, but we need to
392 * adjust for any previously used area
393 */
394 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
395 op->dst.u.dma.offset = dst->sg_wa.sg_used;
396 op->dst.u.dma.length = op->src.u.dma.length;
397 }
398 }
399}
400
401static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
402 struct ccp_op *op)
403{
404 op->init = 0;
405
406 if (dst) {
407 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
408 ccp_empty_queue_buf(dst);
409 else
410 ccp_update_sg_workarea(&dst->sg_wa,
411 op->dst.u.dma.length);
412 }
413}
414
956ee21a
GH
415static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
416 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
417 u32 byte_swap, bool from)
63b94509
TL
418{
419 struct ccp_op op;
420
421 memset(&op, 0, sizeof(op));
422
423 op.cmd_q = cmd_q;
424 op.jobid = jobid;
425 op.eom = 1;
426
427 if (from) {
428 op.soc = 1;
956ee21a
GH
429 op.src.type = CCP_MEMTYPE_SB;
430 op.src.u.sb = sb;
63b94509
TL
431 op.dst.type = CCP_MEMTYPE_SYSTEM;
432 op.dst.u.dma.address = wa->dma.address;
433 op.dst.u.dma.length = wa->length;
434 } else {
435 op.src.type = CCP_MEMTYPE_SYSTEM;
436 op.src.u.dma.address = wa->dma.address;
437 op.src.u.dma.length = wa->length;
956ee21a
GH
438 op.dst.type = CCP_MEMTYPE_SB;
439 op.dst.u.sb = sb;
63b94509
TL
440 }
441
442 op.u.passthru.byte_swap = byte_swap;
443
a43eb985 444 return cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
445}
446
956ee21a
GH
447static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
448 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
449 u32 byte_swap)
63b94509 450{
956ee21a 451 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
63b94509
TL
452}
453
956ee21a
GH
454static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
455 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
456 u32 byte_swap)
63b94509 457{
956ee21a 458 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
63b94509
TL
459}
460
461static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
462 struct ccp_cmd *cmd)
463{
464 struct ccp_aes_engine *aes = &cmd->u.aes;
465 struct ccp_dm_workarea key, ctx;
466 struct ccp_data src;
467 struct ccp_op op;
468 unsigned int dm_offset;
469 int ret;
470
471 if (!((aes->key_len == AES_KEYSIZE_128) ||
472 (aes->key_len == AES_KEYSIZE_192) ||
473 (aes->key_len == AES_KEYSIZE_256)))
474 return -EINVAL;
475
476 if (aes->src_len & (AES_BLOCK_SIZE - 1))
477 return -EINVAL;
478
479 if (aes->iv_len != AES_BLOCK_SIZE)
480 return -EINVAL;
481
482 if (!aes->key || !aes->iv || !aes->src)
483 return -EINVAL;
484
485 if (aes->cmac_final) {
486 if (aes->cmac_key_len != AES_BLOCK_SIZE)
487 return -EINVAL;
488
489 if (!aes->cmac_key)
490 return -EINVAL;
491 }
492
956ee21a
GH
493 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
494 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
495
496 ret = -EIO;
497 memset(&op, 0, sizeof(op));
498 op.cmd_q = cmd_q;
4b394a23 499 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
500 op.sb_key = cmd_q->sb_key;
501 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
502 op.init = 1;
503 op.u.aes.type = aes->type;
504 op.u.aes.mode = aes->mode;
505 op.u.aes.action = aes->action;
506
956ee21a 507 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
508 * and must be in little endian format. Use the 256-bit byte
509 * swap passthru option to convert from big endian to little
510 * endian.
511 */
512 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 513 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
514 DMA_TO_DEVICE);
515 if (ret)
516 return ret;
517
956ee21a 518 dm_offset = CCP_SB_BYTES - aes->key_len;
b5ba0835
GH
519 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
520 if (ret)
521 goto e_key;
956ee21a
GH
522 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
523 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
524 if (ret) {
525 cmd->engine_error = cmd_q->cmd_error;
526 goto e_key;
527 }
528
956ee21a 529 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
530 * must be in little endian format. Use the 256-bit byte swap
531 * passthru option to convert from big endian to little endian.
532 */
533 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 534 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
535 DMA_BIDIRECTIONAL);
536 if (ret)
537 goto e_key;
538
956ee21a 539 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
b5ba0835
GH
540 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
541 if (ret)
542 goto e_ctx;
956ee21a
GH
543 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
544 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
545 if (ret) {
546 cmd->engine_error = cmd_q->cmd_error;
547 goto e_ctx;
548 }
549
550 /* Send data to the CCP AES engine */
551 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
552 AES_BLOCK_SIZE, DMA_TO_DEVICE);
553 if (ret)
554 goto e_ctx;
555
556 while (src.sg_wa.bytes_left) {
557 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
558 if (aes->cmac_final && !src.sg_wa.bytes_left) {
559 op.eom = 1;
560
561 /* Push the K1/K2 key to the CCP now */
956ee21a
GH
562 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
563 op.sb_ctx,
564 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
565 if (ret) {
566 cmd->engine_error = cmd_q->cmd_error;
567 goto e_src;
568 }
569
b5ba0835
GH
570 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
571 aes->cmac_key_len);
572 if (ret)
573 goto e_src;
956ee21a
GH
574 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
575 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
576 if (ret) {
577 cmd->engine_error = cmd_q->cmd_error;
578 goto e_src;
579 }
580 }
581
a43eb985 582 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
583 if (ret) {
584 cmd->engine_error = cmd_q->cmd_error;
585 goto e_src;
586 }
587
588 ccp_process_data(&src, NULL, &op);
589 }
590
591 /* Retrieve the AES context - convert from LE to BE using
592 * 32-byte (256-bit) byteswapping
593 */
956ee21a
GH
594 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
595 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
596 if (ret) {
597 cmd->engine_error = cmd_q->cmd_error;
598 goto e_src;
599 }
600
601 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 602 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
603 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
604
605e_src:
606 ccp_free_data(&src, cmd_q);
607
608e_ctx:
609 ccp_dm_free(&ctx);
610
611e_key:
612 ccp_dm_free(&key);
613
614 return ret;
615}
616
36cf515b
GH
617static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
618 struct ccp_cmd *cmd)
619{
620 struct ccp_aes_engine *aes = &cmd->u.aes;
621 struct ccp_dm_workarea key, ctx, final_wa, tag;
622 struct ccp_data src, dst;
623 struct ccp_data aad;
624 struct ccp_op op;
625
626 unsigned long long *final;
627 unsigned int dm_offset;
3417660c 628 unsigned int authsize;
4e6ddfef 629 unsigned int jobid;
36cf515b
GH
630 unsigned int ilen;
631 bool in_place = true; /* Default value */
632 int ret;
633
634 struct scatterlist *p_inp, sg_inp[2];
635 struct scatterlist *p_tag, sg_tag[2];
636 struct scatterlist *p_outp, sg_outp[2];
637 struct scatterlist *p_aad;
638
639 if (!aes->iv)
640 return -EINVAL;
641
642 if (!((aes->key_len == AES_KEYSIZE_128) ||
643 (aes->key_len == AES_KEYSIZE_192) ||
644 (aes->key_len == AES_KEYSIZE_256)))
645 return -EINVAL;
646
647 if (!aes->key) /* Gotta have a key SGL */
648 return -EINVAL;
649
3417660c
GH
650 /* Zero defaults to 16 bytes, the maximum size */
651 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
652 switch (authsize) {
653 case 16:
654 case 15:
655 case 14:
656 case 13:
657 case 12:
658 case 8:
659 case 4:
660 break;
661 default:
662 return -EINVAL;
663 }
664
36cf515b
GH
665 /* First, decompose the source buffer into AAD & PT,
666 * and the destination buffer into AAD, CT & tag, or
667 * the input into CT & tag.
668 * It is expected that the input and output SGs will
669 * be valid, even if the AAD and input lengths are 0.
670 */
671 p_aad = aes->src;
672 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
673 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
674 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
675 ilen = aes->src_len;
676 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
677 } else {
678 /* Input length for decryption includes tag */
3417660c 679 ilen = aes->src_len - authsize;
36cf515b
GH
680 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
681 }
682
4e6ddfef
HG
683 jobid = CCP_NEW_JOBID(cmd_q->ccp);
684
36cf515b
GH
685 memset(&op, 0, sizeof(op));
686 op.cmd_q = cmd_q;
4e6ddfef 687 op.jobid = jobid;
36cf515b
GH
688 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
689 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
690 op.init = 1;
691 op.u.aes.type = aes->type;
692
693 /* Copy the key to the LSB */
694 ret = ccp_init_dm_workarea(&key, cmd_q,
695 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
696 DMA_TO_DEVICE);
697 if (ret)
698 return ret;
699
700 dm_offset = CCP_SB_BYTES - aes->key_len;
b5ba0835
GH
701 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
702 if (ret)
703 goto e_key;
36cf515b
GH
704 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
705 CCP_PASSTHRU_BYTESWAP_256BIT);
706 if (ret) {
707 cmd->engine_error = cmd_q->cmd_error;
708 goto e_key;
709 }
710
711 /* Copy the context (IV) to the LSB.
712 * There is an assumption here that the IV is 96 bits in length, plus
713 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
714 */
715 ret = ccp_init_dm_workarea(&ctx, cmd_q,
716 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
717 DMA_BIDIRECTIONAL);
718 if (ret)
719 goto e_key;
720
721 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
b5ba0835
GH
722 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
723 if (ret)
724 goto e_ctx;
36cf515b
GH
725
726 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
727 CCP_PASSTHRU_BYTESWAP_256BIT);
728 if (ret) {
729 cmd->engine_error = cmd_q->cmd_error;
730 goto e_ctx;
731 }
732
733 op.init = 1;
734 if (aes->aad_len > 0) {
735 /* Step 1: Run a GHASH over the Additional Authenticated Data */
736 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
737 AES_BLOCK_SIZE,
738 DMA_TO_DEVICE);
739 if (ret)
740 goto e_ctx;
741
742 op.u.aes.mode = CCP_AES_MODE_GHASH;
743 op.u.aes.action = CCP_AES_GHASHAAD;
744
745 while (aad.sg_wa.bytes_left) {
746 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
747
748 ret = cmd_q->ccp->vdata->perform->aes(&op);
749 if (ret) {
750 cmd->engine_error = cmd_q->cmd_error;
751 goto e_aad;
752 }
753
754 ccp_process_data(&aad, NULL, &op);
755 op.init = 0;
756 }
757 }
758
759 op.u.aes.mode = CCP_AES_MODE_GCTR;
760 op.u.aes.action = aes->action;
761
762 if (ilen > 0) {
763 /* Step 2: Run a GCTR over the plaintext */
764 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
765
766 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
767 AES_BLOCK_SIZE,
768 in_place ? DMA_BIDIRECTIONAL
769 : DMA_TO_DEVICE);
770 if (ret)
771 goto e_ctx;
772
773 if (in_place) {
774 dst = src;
775 } else {
776 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
777 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
778 if (ret)
779 goto e_src;
780 }
781
782 op.soc = 0;
783 op.eom = 0;
784 op.init = 1;
785 while (src.sg_wa.bytes_left) {
786 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
787 if (!src.sg_wa.bytes_left) {
b92747ab 788 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
36cf515b
GH
789
790 if (nbytes) {
791 op.eom = 1;
792 op.u.aes.size = (nbytes * 8) - 1;
793 }
794 }
795
796 ret = cmd_q->ccp->vdata->perform->aes(&op);
797 if (ret) {
798 cmd->engine_error = cmd_q->cmd_error;
799 goto e_dst;
800 }
801
802 ccp_process_data(&src, &dst, &op);
803 op.init = 0;
804 }
805 }
806
807 /* Step 3: Update the IV portion of the context with the original IV */
808 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
809 CCP_PASSTHRU_BYTESWAP_256BIT);
810 if (ret) {
811 cmd->engine_error = cmd_q->cmd_error;
812 goto e_dst;
813 }
814
b5ba0835
GH
815 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
816 if (ret)
817 goto e_dst;
36cf515b
GH
818
819 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
820 CCP_PASSTHRU_BYTESWAP_256BIT);
821 if (ret) {
822 cmd->engine_error = cmd_q->cmd_error;
823 goto e_dst;
824 }
825
826 /* Step 4: Concatenate the lengths of the AAD and source, and
827 * hash that 16 byte buffer.
828 */
829 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
830 DMA_BIDIRECTIONAL);
831 if (ret)
832 goto e_dst;
833 final = (unsigned long long *) final_wa.address;
834 final[0] = cpu_to_be64(aes->aad_len * 8);
835 final[1] = cpu_to_be64(ilen * 8);
836
4e6ddfef
HG
837 memset(&op, 0, sizeof(op));
838 op.cmd_q = cmd_q;
839 op.jobid = jobid;
840 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
841 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
842 op.init = 1;
843 op.u.aes.type = aes->type;
36cf515b
GH
844 op.u.aes.mode = CCP_AES_MODE_GHASH;
845 op.u.aes.action = CCP_AES_GHASHFINAL;
846 op.src.type = CCP_MEMTYPE_SYSTEM;
847 op.src.u.dma.address = final_wa.dma.address;
848 op.src.u.dma.length = AES_BLOCK_SIZE;
849 op.dst.type = CCP_MEMTYPE_SYSTEM;
850 op.dst.u.dma.address = final_wa.dma.address;
851 op.dst.u.dma.length = AES_BLOCK_SIZE;
852 op.eom = 1;
853 op.u.aes.size = 0;
854 ret = cmd_q->ccp->vdata->perform->aes(&op);
855 if (ret)
856 goto e_dst;
857
858 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
859 /* Put the ciphered tag after the ciphertext. */
3417660c 860 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
36cf515b
GH
861 } else {
862 /* Does this ciphered tag match the input? */
3417660c 863 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
36cf515b
GH
864 DMA_BIDIRECTIONAL);
865 if (ret)
866 goto e_tag;
3417660c 867 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
b5ba0835
GH
868 if (ret)
869 goto e_tag;
36cf515b 870
dcfc3e05 871 ret = crypto_memneq(tag.address, final_wa.address,
3417660c 872 authsize) ? -EBADMSG : 0;
36cf515b
GH
873 ccp_dm_free(&tag);
874 }
875
876e_tag:
877 ccp_dm_free(&final_wa);
878
879e_dst:
8f67c9be 880 if (ilen > 0 && !in_place)
36cf515b
GH
881 ccp_free_data(&dst, cmd_q);
882
883e_src:
8f67c9be 884 if (ilen > 0)
36cf515b
GH
885 ccp_free_data(&src, cmd_q);
886
887e_aad:
888 if (aes->aad_len)
889 ccp_free_data(&aad, cmd_q);
890
891e_ctx:
892 ccp_dm_free(&ctx);
893
894e_key:
895 ccp_dm_free(&key);
896
897 return ret;
898}
899
63b94509
TL
900static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
901{
902 struct ccp_aes_engine *aes = &cmd->u.aes;
903 struct ccp_dm_workarea key, ctx;
904 struct ccp_data src, dst;
905 struct ccp_op op;
906 unsigned int dm_offset;
907 bool in_place = false;
908 int ret;
909
910 if (aes->mode == CCP_AES_MODE_CMAC)
911 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
912
36cf515b
GH
913 if (aes->mode == CCP_AES_MODE_GCM)
914 return ccp_run_aes_gcm_cmd(cmd_q, cmd);
915
63b94509
TL
916 if (!((aes->key_len == AES_KEYSIZE_128) ||
917 (aes->key_len == AES_KEYSIZE_192) ||
918 (aes->key_len == AES_KEYSIZE_256)))
919 return -EINVAL;
920
921 if (((aes->mode == CCP_AES_MODE_ECB) ||
922 (aes->mode == CCP_AES_MODE_CBC) ||
923 (aes->mode == CCP_AES_MODE_CFB)) &&
924 (aes->src_len & (AES_BLOCK_SIZE - 1)))
925 return -EINVAL;
926
927 if (!aes->key || !aes->src || !aes->dst)
928 return -EINVAL;
929
930 if (aes->mode != CCP_AES_MODE_ECB) {
931 if (aes->iv_len != AES_BLOCK_SIZE)
932 return -EINVAL;
933
934 if (!aes->iv)
935 return -EINVAL;
936 }
937
956ee21a
GH
938 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
939 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
63b94509
TL
940
941 ret = -EIO;
942 memset(&op, 0, sizeof(op));
943 op.cmd_q = cmd_q;
4b394a23 944 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
945 op.sb_key = cmd_q->sb_key;
946 op.sb_ctx = cmd_q->sb_ctx;
63b94509
TL
947 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
948 op.u.aes.type = aes->type;
949 op.u.aes.mode = aes->mode;
950 op.u.aes.action = aes->action;
951
956ee21a 952 /* All supported key sizes fit in a single (32-byte) SB entry
63b94509
TL
953 * and must be in little endian format. Use the 256-bit byte
954 * swap passthru option to convert from big endian to little
955 * endian.
956 */
957 ret = ccp_init_dm_workarea(&key, cmd_q,
956ee21a 958 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
959 DMA_TO_DEVICE);
960 if (ret)
961 return ret;
962
956ee21a 963 dm_offset = CCP_SB_BYTES - aes->key_len;
b5ba0835
GH
964 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
965 if (ret)
966 goto e_key;
956ee21a
GH
967 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
968 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
969 if (ret) {
970 cmd->engine_error = cmd_q->cmd_error;
971 goto e_key;
972 }
973
956ee21a 974 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
975 * must be in little endian format. Use the 256-bit byte swap
976 * passthru option to convert from big endian to little endian.
977 */
978 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 979 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
980 DMA_BIDIRECTIONAL);
981 if (ret)
982 goto e_key;
983
984 if (aes->mode != CCP_AES_MODE_ECB) {
4b394a23 985 /* Load the AES context - convert to LE */
956ee21a 986 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
b5ba0835
GH
987 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
988 if (ret)
989 goto e_ctx;
956ee21a
GH
990 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
991 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
992 if (ret) {
993 cmd->engine_error = cmd_q->cmd_error;
994 goto e_ctx;
995 }
996 }
f7cc02b3
GH
997 switch (aes->mode) {
998 case CCP_AES_MODE_CFB: /* CFB128 only */
999 case CCP_AES_MODE_CTR:
1000 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1001 break;
1002 default:
1003 op.u.aes.size = 0;
1004 }
63b94509
TL
1005
1006 /* Prepare the input and output data workareas. For in-place
1007 * operations we need to set the dma direction to BIDIRECTIONAL
1008 * and copy the src workarea to the dst workarea.
1009 */
1010 if (sg_virt(aes->src) == sg_virt(aes->dst))
1011 in_place = true;
1012
1013 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1014 AES_BLOCK_SIZE,
1015 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1016 if (ret)
1017 goto e_ctx;
1018
8db88467 1019 if (in_place) {
63b94509 1020 dst = src;
8db88467 1021 } else {
63b94509
TL
1022 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1023 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1024 if (ret)
1025 goto e_src;
1026 }
1027
1028 /* Send data to the CCP AES engine */
1029 while (src.sg_wa.bytes_left) {
1030 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1031 if (!src.sg_wa.bytes_left) {
1032 op.eom = 1;
1033
1034 /* Since we don't retrieve the AES context in ECB
1035 * mode we have to wait for the operation to complete
1036 * on the last piece of data
1037 */
1038 if (aes->mode == CCP_AES_MODE_ECB)
1039 op.soc = 1;
1040 }
1041
a43eb985 1042 ret = cmd_q->ccp->vdata->perform->aes(&op);
63b94509
TL
1043 if (ret) {
1044 cmd->engine_error = cmd_q->cmd_error;
1045 goto e_dst;
1046 }
1047
1048 ccp_process_data(&src, &dst, &op);
1049 }
1050
1051 if (aes->mode != CCP_AES_MODE_ECB) {
1052 /* Retrieve the AES context - convert from LE to BE using
1053 * 32-byte (256-bit) byteswapping
1054 */
956ee21a
GH
1055 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1056 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1057 if (ret) {
1058 cmd->engine_error = cmd_q->cmd_error;
1059 goto e_dst;
1060 }
1061
1062 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1063 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1064 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1065 }
1066
1067e_dst:
1068 if (!in_place)
1069 ccp_free_data(&dst, cmd_q);
1070
1071e_src:
1072 ccp_free_data(&src, cmd_q);
1073
1074e_ctx:
1075 ccp_dm_free(&ctx);
1076
1077e_key:
1078 ccp_dm_free(&key);
1079
1080 return ret;
1081}
1082
1083static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1084 struct ccp_cmd *cmd)
1085{
1086 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1087 struct ccp_dm_workarea key, ctx;
1088 struct ccp_data src, dst;
1089 struct ccp_op op;
1090 unsigned int unit_size, dm_offset;
1091 bool in_place = false;
e652399e
GH
1092 unsigned int sb_count;
1093 enum ccp_aes_type aestype;
63b94509
TL
1094 int ret;
1095
1096 switch (xts->unit_size) {
1097 case CCP_XTS_AES_UNIT_SIZE_16:
1098 unit_size = 16;
1099 break;
1100 case CCP_XTS_AES_UNIT_SIZE_512:
1101 unit_size = 512;
1102 break;
1103 case CCP_XTS_AES_UNIT_SIZE_1024:
1104 unit_size = 1024;
1105 break;
1106 case CCP_XTS_AES_UNIT_SIZE_2048:
1107 unit_size = 2048;
1108 break;
1109 case CCP_XTS_AES_UNIT_SIZE_4096:
1110 unit_size = 4096;
1111 break;
1112
1113 default:
1114 return -EINVAL;
1115 }
1116
e652399e
GH
1117 if (xts->key_len == AES_KEYSIZE_128)
1118 aestype = CCP_AES_TYPE_128;
5060ffc9
GH
1119 else if (xts->key_len == AES_KEYSIZE_256)
1120 aestype = CCP_AES_TYPE_256;
e652399e 1121 else
63b94509
TL
1122 return -EINVAL;
1123
1124 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1125 return -EINVAL;
1126
1127 if (xts->iv_len != AES_BLOCK_SIZE)
1128 return -EINVAL;
1129
1130 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1131 return -EINVAL;
1132
956ee21a
GH
1133 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1134 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
63b94509
TL
1135
1136 ret = -EIO;
1137 memset(&op, 0, sizeof(op));
1138 op.cmd_q = cmd_q;
4b394a23 1139 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
956ee21a
GH
1140 op.sb_key = cmd_q->sb_key;
1141 op.sb_ctx = cmd_q->sb_ctx;
63b94509 1142 op.init = 1;
e652399e 1143 op.u.xts.type = aestype;
63b94509
TL
1144 op.u.xts.action = xts->action;
1145 op.u.xts.unit_size = xts->unit_size;
1146
e652399e
GH
1147 /* A version 3 device only supports 128-bit keys, which fits into a
1148 * single SB entry. A version 5 device uses a 512-bit vector, so two
1149 * SB entries.
63b94509 1150 */
e652399e
GH
1151 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1152 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1153 else
1154 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
63b94509 1155 ret = ccp_init_dm_workarea(&key, cmd_q,
e652399e 1156 sb_count * CCP_SB_BYTES,
63b94509
TL
1157 DMA_TO_DEVICE);
1158 if (ret)
1159 return ret;
1160
e652399e
GH
1161 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1162 /* All supported key sizes must be in little endian format.
1163 * Use the 256-bit byte swap passthru option to convert from
1164 * big endian to little endian.
1165 */
1166 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
b5ba0835
GH
1167 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1168 if (ret)
1169 goto e_key;
1170 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1171 if (ret)
1172 goto e_key;
e652399e
GH
1173 } else {
1174 /* Version 5 CCPs use a 512-bit space for the key: each portion
1175 * occupies 256 bits, or one entire slot, and is zero-padded.
1176 */
1177 unsigned int pad;
1178
1179 dm_offset = CCP_SB_BYTES;
1180 pad = dm_offset - xts->key_len;
b5ba0835
GH
1181 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1182 if (ret)
1183 goto e_key;
1184 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1185 xts->key_len, xts->key_len);
1186 if (ret)
1187 goto e_key;
e652399e 1188 }
956ee21a
GH
1189 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1190 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1191 if (ret) {
1192 cmd->engine_error = cmd_q->cmd_error;
1193 goto e_key;
1194 }
1195
956ee21a 1196 /* The AES context fits in a single (32-byte) SB entry and
63b94509
TL
1197 * for XTS is already in little endian format so no byte swapping
1198 * is needed.
1199 */
1200 ret = ccp_init_dm_workarea(&ctx, cmd_q,
956ee21a 1201 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
63b94509
TL
1202 DMA_BIDIRECTIONAL);
1203 if (ret)
1204 goto e_key;
1205
b5ba0835
GH
1206 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1207 if (ret)
1208 goto e_ctx;
956ee21a
GH
1209 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1210 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
1211 if (ret) {
1212 cmd->engine_error = cmd_q->cmd_error;
1213 goto e_ctx;
1214 }
1215
1216 /* Prepare the input and output data workareas. For in-place
1217 * operations we need to set the dma direction to BIDIRECTIONAL
1218 * and copy the src workarea to the dst workarea.
1219 */
1220 if (sg_virt(xts->src) == sg_virt(xts->dst))
1221 in_place = true;
1222
1223 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1224 unit_size,
1225 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1226 if (ret)
1227 goto e_ctx;
1228
8db88467 1229 if (in_place) {
63b94509 1230 dst = src;
8db88467 1231 } else {
63b94509
TL
1232 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1233 unit_size, DMA_FROM_DEVICE);
1234 if (ret)
1235 goto e_src;
1236 }
1237
1238 /* Send data to the CCP AES engine */
1239 while (src.sg_wa.bytes_left) {
1240 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1241 if (!src.sg_wa.bytes_left)
1242 op.eom = 1;
1243
a43eb985 1244 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
63b94509
TL
1245 if (ret) {
1246 cmd->engine_error = cmd_q->cmd_error;
1247 goto e_dst;
1248 }
1249
1250 ccp_process_data(&src, &dst, &op);
1251 }
1252
1253 /* Retrieve the AES context - convert from LE to BE using
1254 * 32-byte (256-bit) byteswapping
1255 */
956ee21a
GH
1256 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1257 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1258 if (ret) {
1259 cmd->engine_error = cmd_q->cmd_error;
1260 goto e_dst;
1261 }
1262
1263 /* ...but we only need AES_BLOCK_SIZE bytes */
956ee21a 1264 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
63b94509
TL
1265 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1266
1267e_dst:
1268 if (!in_place)
1269 ccp_free_data(&dst, cmd_q);
1270
1271e_src:
1272 ccp_free_data(&src, cmd_q);
1273
1274e_ctx:
1275 ccp_dm_free(&ctx);
1276
1277e_key:
1278 ccp_dm_free(&key);
1279
1280 return ret;
1281}
1282
990672d4
GH
1283static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1284{
1285 struct ccp_des3_engine *des3 = &cmd->u.des3;
1286
1287 struct ccp_dm_workarea key, ctx;
1288 struct ccp_data src, dst;
1289 struct ccp_op op;
1290 unsigned int dm_offset;
1291 unsigned int len_singlekey;
1292 bool in_place = false;
1293 int ret;
1294
1295 /* Error checks */
1296 if (!cmd_q->ccp->vdata->perform->des3)
1297 return -EINVAL;
1298
1299 if (des3->key_len != DES3_EDE_KEY_SIZE)
1300 return -EINVAL;
1301
1302 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1303 (des3->mode == CCP_DES3_MODE_CBC)) &&
1304 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1305 return -EINVAL;
1306
1307 if (!des3->key || !des3->src || !des3->dst)
1308 return -EINVAL;
1309
1310 if (des3->mode != CCP_DES3_MODE_ECB) {
1311 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1312 return -EINVAL;
1313
1314 if (!des3->iv)
1315 return -EINVAL;
1316 }
1317
1318 ret = -EIO;
1319 /* Zero out all the fields of the command desc */
1320 memset(&op, 0, sizeof(op));
1321
1322 /* Set up the Function field */
1323 op.cmd_q = cmd_q;
1324 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1325 op.sb_key = cmd_q->sb_key;
1326
1327 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1328 op.u.des3.type = des3->type;
1329 op.u.des3.mode = des3->mode;
1330 op.u.des3.action = des3->action;
1331
1332 /*
1333 * All supported key sizes fit in a single (32-byte) KSB entry and
1334 * (like AES) must be in little endian format. Use the 256-bit byte
1335 * swap passthru option to convert from big endian to little endian.
1336 */
1337 ret = ccp_init_dm_workarea(&key, cmd_q,
1338 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1339 DMA_TO_DEVICE);
1340 if (ret)
1341 return ret;
1342
1343 /*
1344 * The contents of the key triplet are in the reverse order of what
1345 * is required by the engine. Copy the 3 pieces individually to put
1346 * them where they belong.
1347 */
1348 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1349
1350 len_singlekey = des3->key_len / 3;
b5ba0835
GH
1351 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1352 des3->key, 0, len_singlekey);
1353 if (ret)
1354 goto e_key;
1355 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1356 des3->key, len_singlekey, len_singlekey);
1357 if (ret)
1358 goto e_key;
1359 ret = ccp_set_dm_area(&key, dm_offset,
1360 des3->key, 2 * len_singlekey, len_singlekey);
1361 if (ret)
1362 goto e_key;
990672d4
GH
1363
1364 /* Copy the key to the SB */
1365 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1366 CCP_PASSTHRU_BYTESWAP_256BIT);
1367 if (ret) {
1368 cmd->engine_error = cmd_q->cmd_error;
1369 goto e_key;
1370 }
1371
1372 /*
1373 * The DES3 context fits in a single (32-byte) KSB entry and
1374 * must be in little endian format. Use the 256-bit byte swap
1375 * passthru option to convert from big endian to little endian.
1376 */
1377 if (des3->mode != CCP_DES3_MODE_ECB) {
1378 u32 load_mode;
1379
1380 op.sb_ctx = cmd_q->sb_ctx;
1381
1382 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1383 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1384 DMA_BIDIRECTIONAL);
1385 if (ret)
1386 goto e_key;
1387
1388 /* Load the context into the LSB */
1389 dm_offset = CCP_SB_BYTES - des3->iv_len;
b5ba0835
GH
1390 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1391 des3->iv_len);
1392 if (ret)
1393 goto e_ctx;
990672d4
GH
1394
1395 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1396 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
1397 else
1398 load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
1399 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1400 load_mode);
1401 if (ret) {
1402 cmd->engine_error = cmd_q->cmd_error;
1403 goto e_ctx;
1404 }
1405 }
1406
1407 /*
1408 * Prepare the input and output data workareas. For in-place
1409 * operations we need to set the dma direction to BIDIRECTIONAL
1410 * and copy the src workarea to the dst workarea.
1411 */
1412 if (sg_virt(des3->src) == sg_virt(des3->dst))
1413 in_place = true;
1414
1415 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1416 DES3_EDE_BLOCK_SIZE,
1417 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1418 if (ret)
1419 goto e_ctx;
1420
1421 if (in_place)
1422 dst = src;
1423 else {
1424 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1425 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1426 if (ret)
1427 goto e_src;
1428 }
1429
1430 /* Send data to the CCP DES3 engine */
1431 while (src.sg_wa.bytes_left) {
1432 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1433 if (!src.sg_wa.bytes_left) {
1434 op.eom = 1;
1435
1436 /* Since we don't retrieve the context in ECB mode
1437 * we have to wait for the operation to complete
1438 * on the last piece of data
1439 */
1440 op.soc = 0;
1441 }
1442
1443 ret = cmd_q->ccp->vdata->perform->des3(&op);
1444 if (ret) {
1445 cmd->engine_error = cmd_q->cmd_error;
1446 goto e_dst;
1447 }
1448
1449 ccp_process_data(&src, &dst, &op);
1450 }
1451
1452 if (des3->mode != CCP_DES3_MODE_ECB) {
1453 /* Retrieve the context and make BE */
1454 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1455 CCP_PASSTHRU_BYTESWAP_256BIT);
1456 if (ret) {
1457 cmd->engine_error = cmd_q->cmd_error;
1458 goto e_dst;
1459 }
1460
1461 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1462 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1463 dm_offset = CCP_SB_BYTES - des3->iv_len;
1464 else
1465 dm_offset = 0;
1466 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1467 DES3_EDE_BLOCK_SIZE);
1468 }
1469e_dst:
1470 if (!in_place)
1471 ccp_free_data(&dst, cmd_q);
1472
1473e_src:
1474 ccp_free_data(&src, cmd_q);
1475
1476e_ctx:
1477 if (des3->mode != CCP_DES3_MODE_ECB)
1478 ccp_dm_free(&ctx);
1479
1480e_key:
1481 ccp_dm_free(&key);
1482
1483 return ret;
1484}
1485
63b94509
TL
1486static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1487{
1488 struct ccp_sha_engine *sha = &cmd->u.sha;
1489 struct ccp_dm_workarea ctx;
1490 struct ccp_data src;
1491 struct ccp_op op;
4b394a23
GH
1492 unsigned int ioffset, ooffset;
1493 unsigned int digest_size;
1494 int sb_count;
1495 const void *init;
1496 u64 block_size;
1497 int ctx_size;
63b94509
TL
1498 int ret;
1499
4b394a23
GH
1500 switch (sha->type) {
1501 case CCP_SHA_TYPE_1:
1502 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1503 return -EINVAL;
1504 block_size = SHA1_BLOCK_SIZE;
1505 break;
1506 case CCP_SHA_TYPE_224:
1507 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1508 return -EINVAL;
1509 block_size = SHA224_BLOCK_SIZE;
1510 break;
1511 case CCP_SHA_TYPE_256:
1512 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1513 return -EINVAL;
1514 block_size = SHA256_BLOCK_SIZE;
1515 break;
ccebcf3f
GH
1516 case CCP_SHA_TYPE_384:
1517 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1518 || sha->ctx_len < SHA384_DIGEST_SIZE)
1519 return -EINVAL;
1520 block_size = SHA384_BLOCK_SIZE;
1521 break;
1522 case CCP_SHA_TYPE_512:
1523 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1524 || sha->ctx_len < SHA512_DIGEST_SIZE)
1525 return -EINVAL;
1526 block_size = SHA512_BLOCK_SIZE;
1527 break;
4b394a23 1528 default:
63b94509 1529 return -EINVAL;
4b394a23 1530 }
63b94509
TL
1531
1532 if (!sha->ctx)
1533 return -EINVAL;
1534
4b394a23 1535 if (!sha->final && (sha->src_len & (block_size - 1)))
63b94509
TL
1536 return -EINVAL;
1537
4b394a23
GH
1538 /* The version 3 device can't handle zero-length input */
1539 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
63b94509 1540
4b394a23
GH
1541 if (!sha->src_len) {
1542 unsigned int digest_len;
1543 const u8 *sha_zero;
63b94509 1544
4b394a23
GH
1545 /* Not final, just return */
1546 if (!sha->final)
1547 return 0;
63b94509 1548
4b394a23
GH
1549 /* CCP can't do a zero length sha operation so the
1550 * caller must buffer the data.
1551 */
1552 if (sha->msg_bits)
1553 return -EINVAL;
63b94509 1554
4b394a23
GH
1555 /* The CCP cannot perform zero-length sha operations
1556 * so the caller is required to buffer data for the
1557 * final operation. However, a sha operation for a
1558 * message with a total length of zero is valid so
1559 * known values are required to supply the result.
1560 */
1561 switch (sha->type) {
1562 case CCP_SHA_TYPE_1:
1563 sha_zero = sha1_zero_message_hash;
1564 digest_len = SHA1_DIGEST_SIZE;
1565 break;
1566 case CCP_SHA_TYPE_224:
1567 sha_zero = sha224_zero_message_hash;
1568 digest_len = SHA224_DIGEST_SIZE;
1569 break;
1570 case CCP_SHA_TYPE_256:
1571 sha_zero = sha256_zero_message_hash;
1572 digest_len = SHA256_DIGEST_SIZE;
1573 break;
1574 default:
1575 return -EINVAL;
1576 }
63b94509 1577
4b394a23
GH
1578 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1579 digest_len, 1);
1580
1581 return 0;
1582 }
63b94509
TL
1583 }
1584
4b394a23
GH
1585 /* Set variables used throughout */
1586 switch (sha->type) {
1587 case CCP_SHA_TYPE_1:
1588 digest_size = SHA1_DIGEST_SIZE;
1589 init = (void *) ccp_sha1_init;
1590 ctx_size = SHA1_DIGEST_SIZE;
1591 sb_count = 1;
1592 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1593 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1594 else
1595 ooffset = ioffset = 0;
1596 break;
1597 case CCP_SHA_TYPE_224:
1598 digest_size = SHA224_DIGEST_SIZE;
1599 init = (void *) ccp_sha224_init;
1600 ctx_size = SHA256_DIGEST_SIZE;
1601 sb_count = 1;
1602 ioffset = 0;
1603 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1604 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1605 else
1606 ooffset = 0;
1607 break;
1608 case CCP_SHA_TYPE_256:
1609 digest_size = SHA256_DIGEST_SIZE;
1610 init = (void *) ccp_sha256_init;
1611 ctx_size = SHA256_DIGEST_SIZE;
1612 sb_count = 1;
1613 ooffset = ioffset = 0;
1614 break;
ccebcf3f
GH
1615 case CCP_SHA_TYPE_384:
1616 digest_size = SHA384_DIGEST_SIZE;
1617 init = (void *) ccp_sha384_init;
1618 ctx_size = SHA512_DIGEST_SIZE;
1619 sb_count = 2;
1620 ioffset = 0;
1621 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1622 break;
1623 case CCP_SHA_TYPE_512:
1624 digest_size = SHA512_DIGEST_SIZE;
1625 init = (void *) ccp_sha512_init;
1626 ctx_size = SHA512_DIGEST_SIZE;
1627 sb_count = 2;
1628 ooffset = ioffset = 0;
1629 break;
4b394a23
GH
1630 default:
1631 ret = -EINVAL;
1632 goto e_data;
1633 }
63b94509 1634
4b394a23
GH
1635 /* For zero-length plaintext the src pointer is ignored;
1636 * otherwise both parts must be valid
1637 */
1638 if (sha->src_len && !sha->src)
1639 return -EINVAL;
63b94509
TL
1640
1641 memset(&op, 0, sizeof(op));
1642 op.cmd_q = cmd_q;
4b394a23
GH
1643 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1644 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
63b94509
TL
1645 op.u.sha.type = sha->type;
1646 op.u.sha.msg_bits = sha->msg_bits;
1647
ccebcf3f
GH
1648 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1649 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1650 * first slot, and the left half in the second. Each portion must then
1651 * be in little endian format: use the 256-bit byte swap option.
1652 */
4b394a23 1653 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
63b94509
TL
1654 DMA_BIDIRECTIONAL);
1655 if (ret)
1656 return ret;
c11baa02 1657 if (sha->first) {
c11baa02
TL
1658 switch (sha->type) {
1659 case CCP_SHA_TYPE_1:
c11baa02 1660 case CCP_SHA_TYPE_224:
c11baa02 1661 case CCP_SHA_TYPE_256:
4b394a23 1662 memcpy(ctx.address + ioffset, init, ctx_size);
c11baa02 1663 break;
ccebcf3f
GH
1664 case CCP_SHA_TYPE_384:
1665 case CCP_SHA_TYPE_512:
1666 memcpy(ctx.address + ctx_size / 2, init,
1667 ctx_size / 2);
1668 memcpy(ctx.address, init + ctx_size / 2,
1669 ctx_size / 2);
1670 break;
c11baa02
TL
1671 default:
1672 ret = -EINVAL;
1673 goto e_ctx;
1674 }
8db88467 1675 } else {
4b394a23 1676 /* Restore the context */
b5ba0835
GH
1677 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1678 sb_count * CCP_SB_BYTES);
1679 if (ret)
1680 goto e_ctx;
8db88467 1681 }
c11baa02 1682
956ee21a
GH
1683 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1684 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1685 if (ret) {
1686 cmd->engine_error = cmd_q->cmd_error;
1687 goto e_ctx;
1688 }
1689
4b394a23
GH
1690 if (sha->src) {
1691 /* Send data to the CCP SHA engine; block_size is set above */
1692 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1693 block_size, DMA_TO_DEVICE);
1694 if (ret)
1695 goto e_ctx;
63b94509 1696
4b394a23
GH
1697 while (src.sg_wa.bytes_left) {
1698 ccp_prepare_data(&src, NULL, &op, block_size, false);
1699 if (sha->final && !src.sg_wa.bytes_left)
1700 op.eom = 1;
1701
1702 ret = cmd_q->ccp->vdata->perform->sha(&op);
1703 if (ret) {
1704 cmd->engine_error = cmd_q->cmd_error;
1705 goto e_data;
1706 }
63b94509 1707
4b394a23
GH
1708 ccp_process_data(&src, NULL, &op);
1709 }
1710 } else {
1711 op.eom = 1;
a43eb985 1712 ret = cmd_q->ccp->vdata->perform->sha(&op);
63b94509
TL
1713 if (ret) {
1714 cmd->engine_error = cmd_q->cmd_error;
1715 goto e_data;
1716 }
63b94509
TL
1717 }
1718
1719 /* Retrieve the SHA context - convert from LE to BE using
1720 * 32-byte (256-bit) byteswapping to BE
1721 */
956ee21a
GH
1722 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1723 CCP_PASSTHRU_BYTESWAP_256BIT);
63b94509
TL
1724 if (ret) {
1725 cmd->engine_error = cmd_q->cmd_error;
1726 goto e_data;
1727 }
1728
4b394a23
GH
1729 if (sha->final) {
1730 /* Finishing up, so get the digest */
c11baa02
TL
1731 switch (sha->type) {
1732 case CCP_SHA_TYPE_1:
c11baa02 1733 case CCP_SHA_TYPE_224:
c11baa02 1734 case CCP_SHA_TYPE_256:
4b394a23
GH
1735 ccp_get_dm_area(&ctx, ooffset,
1736 sha->ctx, 0,
1737 digest_size);
c11baa02 1738 break;
ccebcf3f
GH
1739 case CCP_SHA_TYPE_384:
1740 case CCP_SHA_TYPE_512:
1741 ccp_get_dm_area(&ctx, 0,
1742 sha->ctx, LSB_ITEM_SIZE - ooffset,
1743 LSB_ITEM_SIZE);
1744 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1745 sha->ctx, 0,
1746 LSB_ITEM_SIZE - ooffset);
1747 break;
c11baa02
TL
1748 default:
1749 ret = -EINVAL;
4b394a23 1750 goto e_ctx;
c11baa02 1751 }
4b394a23
GH
1752 } else {
1753 /* Stash the context */
1754 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1755 sb_count * CCP_SB_BYTES);
1756 }
1757
1758 if (sha->final && sha->opad) {
1759 /* HMAC operation, recursively perform final SHA */
1760 struct ccp_cmd hmac_cmd;
1761 struct scatterlist sg;
1762 u8 *hmac_buf;
c11baa02
TL
1763
1764 if (sha->opad_len != block_size) {
1765 ret = -EINVAL;
1766 goto e_data;
1767 }
1768
1769 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1770 if (!hmac_buf) {
1771 ret = -ENOMEM;
1772 goto e_data;
1773 }
1774 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1775
1776 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
4b394a23
GH
1777 switch (sha->type) {
1778 case CCP_SHA_TYPE_1:
1779 case CCP_SHA_TYPE_224:
1780 case CCP_SHA_TYPE_256:
1781 memcpy(hmac_buf + block_size,
1782 ctx.address + ooffset,
1783 digest_size);
1784 break;
ccebcf3f
GH
1785 case CCP_SHA_TYPE_384:
1786 case CCP_SHA_TYPE_512:
1787 memcpy(hmac_buf + block_size,
1788 ctx.address + LSB_ITEM_SIZE + ooffset,
1789 LSB_ITEM_SIZE);
1790 memcpy(hmac_buf + block_size +
1791 (LSB_ITEM_SIZE - ooffset),
1792 ctx.address,
1793 LSB_ITEM_SIZE);
1794 break;
4b394a23
GH
1795 default:
1796 ret = -EINVAL;
1797 goto e_ctx;
1798 }
c11baa02
TL
1799
1800 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1801 hmac_cmd.engine = CCP_ENGINE_SHA;
1802 hmac_cmd.u.sha.type = sha->type;
1803 hmac_cmd.u.sha.ctx = sha->ctx;
1804 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1805 hmac_cmd.u.sha.src = &sg;
1806 hmac_cmd.u.sha.src_len = block_size + digest_size;
1807 hmac_cmd.u.sha.opad = NULL;
1808 hmac_cmd.u.sha.opad_len = 0;
1809 hmac_cmd.u.sha.first = 1;
1810 hmac_cmd.u.sha.final = 1;
1811 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1812
1813 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1814 if (ret)
1815 cmd->engine_error = hmac_cmd.engine_error;
1816
1817 kfree(hmac_buf);
1818 }
1819
63b94509 1820e_data:
4b394a23
GH
1821 if (sha->src)
1822 ccp_free_data(&src, cmd_q);
63b94509
TL
1823
1824e_ctx:
1825 ccp_dm_free(&ctx);
1826
1827 return ret;
1828}
1829
1830static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1831{
1832 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
6ba46c7d 1833 struct ccp_dm_workarea exp, src, dst;
63b94509 1834 struct ccp_op op;
956ee21a 1835 unsigned int sb_count, i_len, o_len;
63b94509
TL
1836 int ret;
1837
e28c190d
GH
1838 /* Check against the maximum allowable size, in bits */
1839 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
63b94509
TL
1840 return -EINVAL;
1841
1842 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1843 return -EINVAL;
1844
6ba46c7d
GH
1845 memset(&op, 0, sizeof(op));
1846 op.cmd_q = cmd_q;
1847 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1848
63b94509
TL
1849 /* The RSA modulus must precede the message being acted upon, so
1850 * it must be copied to a DMA area where the message and the
1851 * modulus can be concatenated. Therefore the input buffer
1852 * length required is twice the output buffer length (which
6ba46c7d
GH
1853 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1854 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1855 * required.
63b94509 1856 */
6ba46c7d 1857 o_len = 32 * ((rsa->key_size + 255) / 256);
63b94509
TL
1858 i_len = o_len * 2;
1859
d634baea 1860 sb_count = 0;
6ba46c7d
GH
1861 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1862 /* sb_count is the number of storage block slots required
1863 * for the modulus.
1864 */
1865 sb_count = o_len / CCP_SB_BYTES;
1866 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1867 sb_count);
1868 if (!op.sb_key)
1869 return -EIO;
1870 } else {
1871 /* A version 5 device allows a modulus size that will not fit
1872 * in the LSB, so the command will transfer it from memory.
1873 * Set the sb key to the default, even though it's not used.
1874 */
1875 op.sb_key = cmd_q->sb_key;
1876 }
63b94509 1877
6ba46c7d
GH
1878 /* The RSA exponent must be in little endian format. Reverse its
1879 * byte order.
63b94509
TL
1880 */
1881 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1882 if (ret)
956ee21a 1883 goto e_sb;
63b94509 1884
83d650ab 1885 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
355eba5d
TL
1886 if (ret)
1887 goto e_exp;
6ba46c7d
GH
1888
1889 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1890 /* Copy the exponent to the local storage block, using
1891 * as many 32-byte blocks as were allocated above. It's
1892 * already little endian, so no further change is required.
1893 */
1894 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1895 CCP_PASSTHRU_BYTESWAP_NOOP);
1896 if (ret) {
1897 cmd->engine_error = cmd_q->cmd_error;
1898 goto e_exp;
1899 }
1900 } else {
1901 /* The exponent can be retrieved from memory via DMA. */
1902 op.exp.u.dma.address = exp.dma.address;
1903 op.exp.u.dma.offset = 0;
63b94509
TL
1904 }
1905
1906 /* Concatenate the modulus and the message. Both the modulus and
1907 * the operands must be in little endian format. Since the input
1908 * is in big endian format it must be converted.
1909 */
1910 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1911 if (ret)
1912 goto e_exp;
1913
83d650ab 1914 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
355eba5d
TL
1915 if (ret)
1916 goto e_src;
83d650ab 1917 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
355eba5d
TL
1918 if (ret)
1919 goto e_src;
63b94509
TL
1920
1921 /* Prepare the output area for the operation */
6ba46c7d 1922 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
63b94509
TL
1923 if (ret)
1924 goto e_src;
1925
1926 op.soc = 1;
1927 op.src.u.dma.address = src.dma.address;
1928 op.src.u.dma.offset = 0;
1929 op.src.u.dma.length = i_len;
6ba46c7d 1930 op.dst.u.dma.address = dst.dma.address;
63b94509
TL
1931 op.dst.u.dma.offset = 0;
1932 op.dst.u.dma.length = o_len;
1933
1934 op.u.rsa.mod_size = rsa->key_size;
1935 op.u.rsa.input_len = i_len;
1936
a43eb985 1937 ret = cmd_q->ccp->vdata->perform->rsa(&op);
63b94509
TL
1938 if (ret) {
1939 cmd->engine_error = cmd_q->cmd_error;
1940 goto e_dst;
1941 }
1942
6ba46c7d 1943 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
63b94509
TL
1944
1945e_dst:
6ba46c7d 1946 ccp_dm_free(&dst);
63b94509
TL
1947
1948e_src:
1949 ccp_dm_free(&src);
1950
1951e_exp:
1952 ccp_dm_free(&exp);
1953
956ee21a 1954e_sb:
d634baea 1955 if (sb_count)
6ba46c7d 1956 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
63b94509
TL
1957
1958 return ret;
1959}
1960
1961static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1962 struct ccp_cmd *cmd)
1963{
1964 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1965 struct ccp_dm_workarea mask;
1966 struct ccp_data src, dst;
1967 struct ccp_op op;
1968 bool in_place = false;
1969 unsigned int i;
4b394a23 1970 int ret = 0;
63b94509
TL
1971
1972 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1973 return -EINVAL;
1974
1975 if (!pt->src || !pt->dst)
1976 return -EINVAL;
1977
1978 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1979 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1980 return -EINVAL;
1981 if (!pt->mask)
1982 return -EINVAL;
1983 }
1984
956ee21a 1985 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
63b94509
TL
1986
1987 memset(&op, 0, sizeof(op));
1988 op.cmd_q = cmd_q;
4b394a23 1989 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
1990
1991 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1992 /* Load the mask */
956ee21a 1993 op.sb_key = cmd_q->sb_key;
63b94509
TL
1994
1995 ret = ccp_init_dm_workarea(&mask, cmd_q,
956ee21a
GH
1996 CCP_PASSTHRU_SB_COUNT *
1997 CCP_SB_BYTES,
63b94509
TL
1998 DMA_TO_DEVICE);
1999 if (ret)
2000 return ret;
2001
b5ba0835
GH
2002 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2003 if (ret)
2004 goto e_mask;
956ee21a
GH
2005 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2006 CCP_PASSTHRU_BYTESWAP_NOOP);
63b94509
TL
2007 if (ret) {
2008 cmd->engine_error = cmd_q->cmd_error;
2009 goto e_mask;
2010 }
2011 }
2012
2013 /* Prepare the input and output data workareas. For in-place
2014 * operations we need to set the dma direction to BIDIRECTIONAL
2015 * and copy the src workarea to the dst workarea.
2016 */
2017 if (sg_virt(pt->src) == sg_virt(pt->dst))
2018 in_place = true;
2019
2020 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2021 CCP_PASSTHRU_MASKSIZE,
2022 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2023 if (ret)
2024 goto e_mask;
2025
8db88467 2026 if (in_place) {
63b94509 2027 dst = src;
8db88467 2028 } else {
63b94509
TL
2029 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2030 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2031 if (ret)
2032 goto e_src;
2033 }
2034
2035 /* Send data to the CCP Passthru engine
2036 * Because the CCP engine works on a single source and destination
2037 * dma address at a time, each entry in the source scatterlist
2038 * (after the dma_map_sg call) must be less than or equal to the
2039 * (remaining) length in the destination scatterlist entry and the
2040 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2041 */
2042 dst.sg_wa.sg_used = 0;
2043 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2044 if (!dst.sg_wa.sg ||
2045 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
2046 ret = -EINVAL;
2047 goto e_dst;
2048 }
2049
2050 if (i == src.sg_wa.dma_count) {
2051 op.eom = 1;
2052 op.soc = 1;
2053 }
2054
2055 op.src.type = CCP_MEMTYPE_SYSTEM;
2056 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2057 op.src.u.dma.offset = 0;
2058 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2059
2060 op.dst.type = CCP_MEMTYPE_SYSTEM;
2061 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
80e84c16
DJ
2062 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2063 op.dst.u.dma.length = op.src.u.dma.length;
63b94509 2064
a43eb985 2065 ret = cmd_q->ccp->vdata->perform->passthru(&op);
63b94509
TL
2066 if (ret) {
2067 cmd->engine_error = cmd_q->cmd_error;
2068 goto e_dst;
2069 }
2070
2071 dst.sg_wa.sg_used += src.sg_wa.sg->length;
2072 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
2073 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2074 dst.sg_wa.sg_used = 0;
2075 }
2076 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2077 }
2078
2079e_dst:
2080 if (!in_place)
2081 ccp_free_data(&dst, cmd_q);
2082
2083e_src:
2084 ccp_free_data(&src, cmd_q);
2085
2086e_mask:
2087 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2088 ccp_dm_free(&mask);
2089
2090 return ret;
2091}
2092
58ea8abf
GH
2093static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2094 struct ccp_cmd *cmd)
2095{
2096 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2097 struct ccp_dm_workarea mask;
2098 struct ccp_op op;
2099 int ret;
2100
2101 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2102 return -EINVAL;
2103
2104 if (!pt->src_dma || !pt->dst_dma)
2105 return -EINVAL;
2106
2107 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2108 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2109 return -EINVAL;
2110 if (!pt->mask)
2111 return -EINVAL;
2112 }
2113
956ee21a 2114 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
58ea8abf
GH
2115
2116 memset(&op, 0, sizeof(op));
2117 op.cmd_q = cmd_q;
bce386af 2118 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
58ea8abf
GH
2119
2120 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2121 /* Load the mask */
956ee21a 2122 op.sb_key = cmd_q->sb_key;
58ea8abf
GH
2123
2124 mask.length = pt->mask_len;
2125 mask.dma.address = pt->mask;
2126 mask.dma.length = pt->mask_len;
2127
956ee21a 2128 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
58ea8abf
GH
2129 CCP_PASSTHRU_BYTESWAP_NOOP);
2130 if (ret) {
2131 cmd->engine_error = cmd_q->cmd_error;
2132 return ret;
2133 }
2134 }
2135
2136 /* Send data to the CCP Passthru engine */
2137 op.eom = 1;
2138 op.soc = 1;
2139
2140 op.src.type = CCP_MEMTYPE_SYSTEM;
2141 op.src.u.dma.address = pt->src_dma;
2142 op.src.u.dma.offset = 0;
2143 op.src.u.dma.length = pt->src_len;
2144
2145 op.dst.type = CCP_MEMTYPE_SYSTEM;
2146 op.dst.u.dma.address = pt->dst_dma;
2147 op.dst.u.dma.offset = 0;
2148 op.dst.u.dma.length = pt->src_len;
2149
a43eb985 2150 ret = cmd_q->ccp->vdata->perform->passthru(&op);
58ea8abf
GH
2151 if (ret)
2152 cmd->engine_error = cmd_q->cmd_error;
2153
2154 return ret;
2155}
2156
63b94509
TL
2157static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2158{
2159 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2160 struct ccp_dm_workarea src, dst;
2161 struct ccp_op op;
2162 int ret;
2163 u8 *save;
2164
2165 if (!ecc->u.mm.operand_1 ||
2166 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2167 return -EINVAL;
2168
2169 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2170 if (!ecc->u.mm.operand_2 ||
2171 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2172 return -EINVAL;
2173
2174 if (!ecc->u.mm.result ||
2175 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2176 return -EINVAL;
2177
2178 memset(&op, 0, sizeof(op));
2179 op.cmd_q = cmd_q;
4b394a23 2180 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2181
2182 /* Concatenate the modulus and the operands. Both the modulus and
2183 * the operands must be in little endian format. Since the input
2184 * is in big endian format it must be converted and placed in a
2185 * fixed length buffer.
2186 */
2187 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2188 DMA_TO_DEVICE);
2189 if (ret)
2190 return ret;
2191
2192 /* Save the workarea address since it is updated in order to perform
2193 * the concatenation
2194 */
2195 save = src.address;
2196
2197 /* Copy the ECC modulus */
83d650ab 2198 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2199 if (ret)
2200 goto e_src;
63b94509
TL
2201 src.address += CCP_ECC_OPERAND_SIZE;
2202
2203 /* Copy the first operand */
83d650ab
GH
2204 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2205 ecc->u.mm.operand_1_len);
355eba5d
TL
2206 if (ret)
2207 goto e_src;
63b94509
TL
2208 src.address += CCP_ECC_OPERAND_SIZE;
2209
2210 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2211 /* Copy the second operand */
83d650ab
GH
2212 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2213 ecc->u.mm.operand_2_len);
355eba5d
TL
2214 if (ret)
2215 goto e_src;
63b94509
TL
2216 src.address += CCP_ECC_OPERAND_SIZE;
2217 }
2218
2219 /* Restore the workarea address */
2220 src.address = save;
2221
2222 /* Prepare the output area for the operation */
2223 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2224 DMA_FROM_DEVICE);
2225 if (ret)
2226 goto e_src;
2227
2228 op.soc = 1;
2229 op.src.u.dma.address = src.dma.address;
2230 op.src.u.dma.offset = 0;
2231 op.src.u.dma.length = src.length;
2232 op.dst.u.dma.address = dst.dma.address;
2233 op.dst.u.dma.offset = 0;
2234 op.dst.u.dma.length = dst.length;
2235
2236 op.u.ecc.function = cmd->u.ecc.function;
2237
a43eb985 2238 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2239 if (ret) {
2240 cmd->engine_error = cmd_q->cmd_error;
2241 goto e_dst;
2242 }
2243
2244 ecc->ecc_result = le16_to_cpup(
2245 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2246 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2247 ret = -EIO;
2248 goto e_dst;
2249 }
2250
2251 /* Save the ECC result */
83d650ab
GH
2252 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2253 CCP_ECC_MODULUS_BYTES);
63b94509
TL
2254
2255e_dst:
2256 ccp_dm_free(&dst);
2257
2258e_src:
2259 ccp_dm_free(&src);
2260
2261 return ret;
2262}
2263
2264static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2265{
2266 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2267 struct ccp_dm_workarea src, dst;
2268 struct ccp_op op;
2269 int ret;
2270 u8 *save;
2271
2272 if (!ecc->u.pm.point_1.x ||
2273 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2274 !ecc->u.pm.point_1.y ||
2275 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2276 return -EINVAL;
2277
2278 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2279 if (!ecc->u.pm.point_2.x ||
2280 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2281 !ecc->u.pm.point_2.y ||
2282 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2283 return -EINVAL;
2284 } else {
2285 if (!ecc->u.pm.domain_a ||
2286 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2287 return -EINVAL;
2288
2289 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2290 if (!ecc->u.pm.scalar ||
2291 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2292 return -EINVAL;
2293 }
2294
2295 if (!ecc->u.pm.result.x ||
2296 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2297 !ecc->u.pm.result.y ||
2298 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2299 return -EINVAL;
2300
2301 memset(&op, 0, sizeof(op));
2302 op.cmd_q = cmd_q;
4b394a23 2303 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
63b94509
TL
2304
2305 /* Concatenate the modulus and the operands. Both the modulus and
2306 * the operands must be in little endian format. Since the input
2307 * is in big endian format it must be converted and placed in a
2308 * fixed length buffer.
2309 */
2310 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2311 DMA_TO_DEVICE);
2312 if (ret)
2313 return ret;
2314
2315 /* Save the workarea address since it is updated in order to perform
2316 * the concatenation
2317 */
2318 save = src.address;
2319
2320 /* Copy the ECC modulus */
83d650ab 2321 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
355eba5d
TL
2322 if (ret)
2323 goto e_src;
63b94509
TL
2324 src.address += CCP_ECC_OPERAND_SIZE;
2325
2326 /* Copy the first point X and Y coordinate */
83d650ab
GH
2327 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2328 ecc->u.pm.point_1.x_len);
355eba5d
TL
2329 if (ret)
2330 goto e_src;
63b94509 2331 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2332 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2333 ecc->u.pm.point_1.y_len);
355eba5d
TL
2334 if (ret)
2335 goto e_src;
63b94509
TL
2336 src.address += CCP_ECC_OPERAND_SIZE;
2337
4b394a23 2338 /* Set the first point Z coordinate to 1 */
8db88467 2339 *src.address = 0x01;
63b94509
TL
2340 src.address += CCP_ECC_OPERAND_SIZE;
2341
2342 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2343 /* Copy the second point X and Y coordinate */
83d650ab
GH
2344 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2345 ecc->u.pm.point_2.x_len);
355eba5d
TL
2346 if (ret)
2347 goto e_src;
63b94509 2348 src.address += CCP_ECC_OPERAND_SIZE;
83d650ab
GH
2349 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2350 ecc->u.pm.point_2.y_len);
355eba5d
TL
2351 if (ret)
2352 goto e_src;
63b94509
TL
2353 src.address += CCP_ECC_OPERAND_SIZE;
2354
4b394a23 2355 /* Set the second point Z coordinate to 1 */
8db88467 2356 *src.address = 0x01;
63b94509
TL
2357 src.address += CCP_ECC_OPERAND_SIZE;
2358 } else {
2359 /* Copy the Domain "a" parameter */
83d650ab
GH
2360 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2361 ecc->u.pm.domain_a_len);
355eba5d
TL
2362 if (ret)
2363 goto e_src;
63b94509
TL
2364 src.address += CCP_ECC_OPERAND_SIZE;
2365
2366 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2367 /* Copy the scalar value */
83d650ab
GH
2368 ret = ccp_reverse_set_dm_area(&src, 0,
2369 ecc->u.pm.scalar, 0,
2370 ecc->u.pm.scalar_len);
355eba5d
TL
2371 if (ret)
2372 goto e_src;
63b94509
TL
2373 src.address += CCP_ECC_OPERAND_SIZE;
2374 }
2375 }
2376
2377 /* Restore the workarea address */
2378 src.address = save;
2379
2380 /* Prepare the output area for the operation */
2381 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2382 DMA_FROM_DEVICE);
2383 if (ret)
2384 goto e_src;
2385
2386 op.soc = 1;
2387 op.src.u.dma.address = src.dma.address;
2388 op.src.u.dma.offset = 0;
2389 op.src.u.dma.length = src.length;
2390 op.dst.u.dma.address = dst.dma.address;
2391 op.dst.u.dma.offset = 0;
2392 op.dst.u.dma.length = dst.length;
2393
2394 op.u.ecc.function = cmd->u.ecc.function;
2395
a43eb985 2396 ret = cmd_q->ccp->vdata->perform->ecc(&op);
63b94509
TL
2397 if (ret) {
2398 cmd->engine_error = cmd_q->cmd_error;
2399 goto e_dst;
2400 }
2401
2402 ecc->ecc_result = le16_to_cpup(
2403 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2404 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2405 ret = -EIO;
2406 goto e_dst;
2407 }
2408
2409 /* Save the workarea address since it is updated as we walk through
2410 * to copy the point math result
2411 */
2412 save = dst.address;
2413
2414 /* Save the ECC result X and Y coordinates */
83d650ab 2415 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
63b94509
TL
2416 CCP_ECC_MODULUS_BYTES);
2417 dst.address += CCP_ECC_OUTPUT_SIZE;
83d650ab 2418 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
63b94509
TL
2419 CCP_ECC_MODULUS_BYTES);
2420 dst.address += CCP_ECC_OUTPUT_SIZE;
2421
2422 /* Restore the workarea address */
2423 dst.address = save;
2424
2425e_dst:
2426 ccp_dm_free(&dst);
2427
2428e_src:
2429 ccp_dm_free(&src);
2430
2431 return ret;
2432}
2433
2434static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2435{
2436 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2437
2438 ecc->ecc_result = 0;
2439
2440 if (!ecc->mod ||
2441 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2442 return -EINVAL;
2443
2444 switch (ecc->function) {
2445 case CCP_ECC_FUNCTION_MMUL_384BIT:
2446 case CCP_ECC_FUNCTION_MADD_384BIT:
2447 case CCP_ECC_FUNCTION_MINV_384BIT:
2448 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2449
2450 case CCP_ECC_FUNCTION_PADD_384BIT:
2451 case CCP_ECC_FUNCTION_PMUL_384BIT:
2452 case CCP_ECC_FUNCTION_PDBL_384BIT:
2453 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2454
2455 default:
2456 return -EINVAL;
2457 }
2458}
2459
2460int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2461{
2462 int ret;
2463
2464 cmd->engine_error = 0;
2465 cmd_q->cmd_error = 0;
2466 cmd_q->int_rcvd = 0;
bb4e89b3 2467 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
63b94509
TL
2468
2469 switch (cmd->engine) {
2470 case CCP_ENGINE_AES:
2471 ret = ccp_run_aes_cmd(cmd_q, cmd);
2472 break;
2473 case CCP_ENGINE_XTS_AES_128:
2474 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2475 break;
990672d4
GH
2476 case CCP_ENGINE_DES3:
2477 ret = ccp_run_des3_cmd(cmd_q, cmd);
2478 break;
63b94509
TL
2479 case CCP_ENGINE_SHA:
2480 ret = ccp_run_sha_cmd(cmd_q, cmd);
2481 break;
2482 case CCP_ENGINE_RSA:
2483 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2484 break;
2485 case CCP_ENGINE_PASSTHRU:
58ea8abf
GH
2486 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2487 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2488 else
2489 ret = ccp_run_passthru_cmd(cmd_q, cmd);
63b94509
TL
2490 break;
2491 case CCP_ENGINE_ECC:
2492 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2493 break;
2494 default:
2495 ret = -EINVAL;
2496 }
2497
2498 return ret;
2499}