1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) driver
5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/des.h>
17 #include <linux/ccp.h>
21 /* SHA initial context values */
22 static const __be32 ccp_sha1_init
[SHA1_DIGEST_SIZE
/ sizeof(__be32
)] = {
23 cpu_to_be32(SHA1_H0
), cpu_to_be32(SHA1_H1
),
24 cpu_to_be32(SHA1_H2
), cpu_to_be32(SHA1_H3
),
28 static const __be32 ccp_sha224_init
[SHA256_DIGEST_SIZE
/ sizeof(__be32
)] = {
29 cpu_to_be32(SHA224_H0
), cpu_to_be32(SHA224_H1
),
30 cpu_to_be32(SHA224_H2
), cpu_to_be32(SHA224_H3
),
31 cpu_to_be32(SHA224_H4
), cpu_to_be32(SHA224_H5
),
32 cpu_to_be32(SHA224_H6
), cpu_to_be32(SHA224_H7
),
35 static const __be32 ccp_sha256_init
[SHA256_DIGEST_SIZE
/ sizeof(__be32
)] = {
36 cpu_to_be32(SHA256_H0
), cpu_to_be32(SHA256_H1
),
37 cpu_to_be32(SHA256_H2
), cpu_to_be32(SHA256_H3
),
38 cpu_to_be32(SHA256_H4
), cpu_to_be32(SHA256_H5
),
39 cpu_to_be32(SHA256_H6
), cpu_to_be32(SHA256_H7
),
42 static const __be64 ccp_sha384_init
[SHA512_DIGEST_SIZE
/ sizeof(__be64
)] = {
43 cpu_to_be64(SHA384_H0
), cpu_to_be64(SHA384_H1
),
44 cpu_to_be64(SHA384_H2
), cpu_to_be64(SHA384_H3
),
45 cpu_to_be64(SHA384_H4
), cpu_to_be64(SHA384_H5
),
46 cpu_to_be64(SHA384_H6
), cpu_to_be64(SHA384_H7
),
49 static const __be64 ccp_sha512_init
[SHA512_DIGEST_SIZE
/ sizeof(__be64
)] = {
50 cpu_to_be64(SHA512_H0
), cpu_to_be64(SHA512_H1
),
51 cpu_to_be64(SHA512_H2
), cpu_to_be64(SHA512_H3
),
52 cpu_to_be64(SHA512_H4
), cpu_to_be64(SHA512_H5
),
53 cpu_to_be64(SHA512_H6
), cpu_to_be64(SHA512_H7
),
56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
57 ccp_gen_jobid(ccp) : 0)
59 static u32
ccp_gen_jobid(struct ccp_device
*ccp
)
61 return atomic_inc_return(&ccp
->current_id
) & CCP_JOBID_MASK
;
64 static void ccp_sg_free(struct ccp_sg_workarea
*wa
)
67 dma_unmap_sg(wa
->dma_dev
, wa
->dma_sg
, wa
->nents
, wa
->dma_dir
);
72 static int ccp_init_sg_workarea(struct ccp_sg_workarea
*wa
, struct device
*dev
,
73 struct scatterlist
*sg
, u64 len
,
74 enum dma_data_direction dma_dir
)
76 memset(wa
, 0, sizeof(*wa
));
82 wa
->nents
= sg_nents_for_len(sg
, len
);
92 if (dma_dir
== DMA_NONE
)
97 wa
->dma_dir
= dma_dir
;
98 wa
->dma_count
= dma_map_sg(dev
, sg
, wa
->nents
, dma_dir
);
105 static void ccp_update_sg_workarea(struct ccp_sg_workarea
*wa
, unsigned int len
)
107 unsigned int nbytes
= min_t(u64
, len
, wa
->bytes_left
);
112 wa
->sg_used
+= nbytes
;
113 wa
->bytes_left
-= nbytes
;
114 if (wa
->sg_used
== wa
->sg
->length
) {
115 wa
->sg
= sg_next(wa
->sg
);
120 static void ccp_dm_free(struct ccp_dm_workarea
*wa
)
122 if (wa
->length
<= CCP_DMAPOOL_MAX_SIZE
) {
124 dma_pool_free(wa
->dma_pool
, wa
->address
,
128 dma_unmap_single(wa
->dev
, wa
->dma
.address
, wa
->length
,
137 static int ccp_init_dm_workarea(struct ccp_dm_workarea
*wa
,
138 struct ccp_cmd_queue
*cmd_q
,
140 enum dma_data_direction dir
)
142 memset(wa
, 0, sizeof(*wa
));
147 wa
->dev
= cmd_q
->ccp
->dev
;
150 if (len
<= CCP_DMAPOOL_MAX_SIZE
) {
151 wa
->dma_pool
= cmd_q
->dma_pool
;
153 wa
->address
= dma_pool_alloc(wa
->dma_pool
, GFP_KERNEL
,
158 wa
->dma
.length
= CCP_DMAPOOL_MAX_SIZE
;
160 memset(wa
->address
, 0, CCP_DMAPOOL_MAX_SIZE
);
162 wa
->address
= kzalloc(len
, GFP_KERNEL
);
166 wa
->dma
.address
= dma_map_single(wa
->dev
, wa
->address
, len
,
168 if (dma_mapping_error(wa
->dev
, wa
->dma
.address
))
171 wa
->dma
.length
= len
;
178 static int ccp_set_dm_area(struct ccp_dm_workarea
*wa
, unsigned int wa_offset
,
179 struct scatterlist
*sg
, unsigned int sg_offset
,
182 WARN_ON(!wa
->address
);
184 if (len
> (wa
->length
- wa_offset
))
187 scatterwalk_map_and_copy(wa
->address
+ wa_offset
, sg
, sg_offset
, len
,
192 static void ccp_get_dm_area(struct ccp_dm_workarea
*wa
, unsigned int wa_offset
,
193 struct scatterlist
*sg
, unsigned int sg_offset
,
196 WARN_ON(!wa
->address
);
198 scatterwalk_map_and_copy(wa
->address
+ wa_offset
, sg
, sg_offset
, len
,
202 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea
*wa
,
203 unsigned int wa_offset
,
204 struct scatterlist
*sg
,
205 unsigned int sg_offset
,
211 rc
= ccp_set_dm_area(wa
, wa_offset
, sg
, sg_offset
, len
);
215 p
= wa
->address
+ wa_offset
;
227 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea
*wa
,
228 unsigned int wa_offset
,
229 struct scatterlist
*sg
,
230 unsigned int sg_offset
,
235 p
= wa
->address
+ wa_offset
;
245 ccp_get_dm_area(wa
, wa_offset
, sg
, sg_offset
, len
);
248 static void ccp_free_data(struct ccp_data
*data
, struct ccp_cmd_queue
*cmd_q
)
250 ccp_dm_free(&data
->dm_wa
);
251 ccp_sg_free(&data
->sg_wa
);
254 static int ccp_init_data(struct ccp_data
*data
, struct ccp_cmd_queue
*cmd_q
,
255 struct scatterlist
*sg
, u64 sg_len
,
257 enum dma_data_direction dir
)
261 memset(data
, 0, sizeof(*data
));
263 ret
= ccp_init_sg_workarea(&data
->sg_wa
, cmd_q
->ccp
->dev
, sg
, sg_len
,
268 ret
= ccp_init_dm_workarea(&data
->dm_wa
, cmd_q
, dm_len
, dir
);
275 ccp_free_data(data
, cmd_q
);
280 static unsigned int ccp_queue_buf(struct ccp_data
*data
, unsigned int from
)
282 struct ccp_sg_workarea
*sg_wa
= &data
->sg_wa
;
283 struct ccp_dm_workarea
*dm_wa
= &data
->dm_wa
;
284 unsigned int buf_count
, nbytes
;
286 /* Clear the buffer if setting it */
288 memset(dm_wa
->address
, 0, dm_wa
->length
);
293 /* Perform the copy operation
294 * nbytes will always be <= UINT_MAX because dm_wa->length is
297 nbytes
= min_t(u64
, sg_wa
->bytes_left
, dm_wa
->length
);
298 scatterwalk_map_and_copy(dm_wa
->address
, sg_wa
->sg
, sg_wa
->sg_used
,
301 /* Update the structures and generate the count */
303 while (sg_wa
->bytes_left
&& (buf_count
< dm_wa
->length
)) {
304 nbytes
= min(sg_wa
->sg
->length
- sg_wa
->sg_used
,
305 dm_wa
->length
- buf_count
);
306 nbytes
= min_t(u64
, sg_wa
->bytes_left
, nbytes
);
309 ccp_update_sg_workarea(sg_wa
, nbytes
);
315 static unsigned int ccp_fill_queue_buf(struct ccp_data
*data
)
317 return ccp_queue_buf(data
, 0);
320 static unsigned int ccp_empty_queue_buf(struct ccp_data
*data
)
322 return ccp_queue_buf(data
, 1);
325 static void ccp_prepare_data(struct ccp_data
*src
, struct ccp_data
*dst
,
326 struct ccp_op
*op
, unsigned int block_size
,
329 unsigned int sg_src_len
, sg_dst_len
, op_len
;
331 /* The CCP can only DMA from/to one address each per operation. This
332 * requires that we find the smallest DMA area between the source
333 * and destination. The resulting len values will always be <= UINT_MAX
334 * because the dma length is an unsigned int.
336 sg_src_len
= sg_dma_len(src
->sg_wa
.sg
) - src
->sg_wa
.sg_used
;
337 sg_src_len
= min_t(u64
, src
->sg_wa
.bytes_left
, sg_src_len
);
340 sg_dst_len
= sg_dma_len(dst
->sg_wa
.sg
) - dst
->sg_wa
.sg_used
;
341 sg_dst_len
= min_t(u64
, src
->sg_wa
.bytes_left
, sg_dst_len
);
342 op_len
= min(sg_src_len
, sg_dst_len
);
347 /* The data operation length will be at least block_size in length
348 * or the smaller of available sg room remaining for the source or
351 op_len
= max(op_len
, block_size
);
353 /* Unless we have to buffer data, there's no reason to wait */
356 if (sg_src_len
< block_size
) {
357 /* Not enough data in the sg element, so it
358 * needs to be buffered into a blocksize chunk
360 int cp_len
= ccp_fill_queue_buf(src
);
363 op
->src
.u
.dma
.address
= src
->dm_wa
.dma
.address
;
364 op
->src
.u
.dma
.offset
= 0;
365 op
->src
.u
.dma
.length
= (blocksize_op
) ? block_size
: cp_len
;
367 /* Enough data in the sg element, but we need to
368 * adjust for any previously copied data
370 op
->src
.u
.dma
.address
= sg_dma_address(src
->sg_wa
.sg
);
371 op
->src
.u
.dma
.offset
= src
->sg_wa
.sg_used
;
372 op
->src
.u
.dma
.length
= op_len
& ~(block_size
- 1);
374 ccp_update_sg_workarea(&src
->sg_wa
, op
->src
.u
.dma
.length
);
378 if (sg_dst_len
< block_size
) {
379 /* Not enough room in the sg element or we're on the
380 * last piece of data (when using padding), so the
381 * output needs to be buffered into a blocksize chunk
384 op
->dst
.u
.dma
.address
= dst
->dm_wa
.dma
.address
;
385 op
->dst
.u
.dma
.offset
= 0;
386 op
->dst
.u
.dma
.length
= op
->src
.u
.dma
.length
;
388 /* Enough room in the sg element, but we need to
389 * adjust for any previously used area
391 op
->dst
.u
.dma
.address
= sg_dma_address(dst
->sg_wa
.sg
);
392 op
->dst
.u
.dma
.offset
= dst
->sg_wa
.sg_used
;
393 op
->dst
.u
.dma
.length
= op
->src
.u
.dma
.length
;
398 static void ccp_process_data(struct ccp_data
*src
, struct ccp_data
*dst
,
404 if (op
->dst
.u
.dma
.address
== dst
->dm_wa
.dma
.address
)
405 ccp_empty_queue_buf(dst
);
407 ccp_update_sg_workarea(&dst
->sg_wa
,
408 op
->dst
.u
.dma
.length
);
412 static int ccp_copy_to_from_sb(struct ccp_cmd_queue
*cmd_q
,
413 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 sb
,
414 u32 byte_swap
, bool from
)
418 memset(&op
, 0, sizeof(op
));
426 op
.src
.type
= CCP_MEMTYPE_SB
;
428 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
429 op
.dst
.u
.dma
.address
= wa
->dma
.address
;
430 op
.dst
.u
.dma
.length
= wa
->length
;
432 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
433 op
.src
.u
.dma
.address
= wa
->dma
.address
;
434 op
.src
.u
.dma
.length
= wa
->length
;
435 op
.dst
.type
= CCP_MEMTYPE_SB
;
439 op
.u
.passthru
.byte_swap
= byte_swap
;
441 return cmd_q
->ccp
->vdata
->perform
->passthru(&op
);
444 static int ccp_copy_to_sb(struct ccp_cmd_queue
*cmd_q
,
445 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 sb
,
448 return ccp_copy_to_from_sb(cmd_q
, wa
, jobid
, sb
, byte_swap
, false);
451 static int ccp_copy_from_sb(struct ccp_cmd_queue
*cmd_q
,
452 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 sb
,
455 return ccp_copy_to_from_sb(cmd_q
, wa
, jobid
, sb
, byte_swap
, true);
458 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue
*cmd_q
,
461 struct ccp_aes_engine
*aes
= &cmd
->u
.aes
;
462 struct ccp_dm_workarea key
, ctx
;
465 unsigned int dm_offset
;
468 if (!((aes
->key_len
== AES_KEYSIZE_128
) ||
469 (aes
->key_len
== AES_KEYSIZE_192
) ||
470 (aes
->key_len
== AES_KEYSIZE_256
)))
473 if (aes
->src_len
& (AES_BLOCK_SIZE
- 1))
476 if (aes
->iv_len
!= AES_BLOCK_SIZE
)
479 if (!aes
->key
|| !aes
->iv
|| !aes
->src
)
482 if (aes
->cmac_final
) {
483 if (aes
->cmac_key_len
!= AES_BLOCK_SIZE
)
490 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT
!= 1);
491 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT
!= 1);
494 memset(&op
, 0, sizeof(op
));
496 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
497 op
.sb_key
= cmd_q
->sb_key
;
498 op
.sb_ctx
= cmd_q
->sb_ctx
;
500 op
.u
.aes
.type
= aes
->type
;
501 op
.u
.aes
.mode
= aes
->mode
;
502 op
.u
.aes
.action
= aes
->action
;
504 /* All supported key sizes fit in a single (32-byte) SB entry
505 * and must be in little endian format. Use the 256-bit byte
506 * swap passthru option to convert from big endian to little
509 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
510 CCP_AES_KEY_SB_COUNT
* CCP_SB_BYTES
,
515 dm_offset
= CCP_SB_BYTES
- aes
->key_len
;
516 ret
= ccp_set_dm_area(&key
, dm_offset
, aes
->key
, 0, aes
->key_len
);
519 ret
= ccp_copy_to_sb(cmd_q
, &key
, op
.jobid
, op
.sb_key
,
520 CCP_PASSTHRU_BYTESWAP_256BIT
);
522 cmd
->engine_error
= cmd_q
->cmd_error
;
526 /* The AES context fits in a single (32-byte) SB entry and
527 * must be in little endian format. Use the 256-bit byte swap
528 * passthru option to convert from big endian to little endian.
530 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
531 CCP_AES_CTX_SB_COUNT
* CCP_SB_BYTES
,
536 dm_offset
= CCP_SB_BYTES
- AES_BLOCK_SIZE
;
537 ret
= ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
540 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
541 CCP_PASSTHRU_BYTESWAP_256BIT
);
543 cmd
->engine_error
= cmd_q
->cmd_error
;
547 /* Send data to the CCP AES engine */
548 ret
= ccp_init_data(&src
, cmd_q
, aes
->src
, aes
->src_len
,
549 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
553 while (src
.sg_wa
.bytes_left
) {
554 ccp_prepare_data(&src
, NULL
, &op
, AES_BLOCK_SIZE
, true);
555 if (aes
->cmac_final
&& !src
.sg_wa
.bytes_left
) {
558 /* Push the K1/K2 key to the CCP now */
559 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
,
561 CCP_PASSTHRU_BYTESWAP_256BIT
);
563 cmd
->engine_error
= cmd_q
->cmd_error
;
567 ret
= ccp_set_dm_area(&ctx
, 0, aes
->cmac_key
, 0,
571 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
572 CCP_PASSTHRU_BYTESWAP_256BIT
);
574 cmd
->engine_error
= cmd_q
->cmd_error
;
579 ret
= cmd_q
->ccp
->vdata
->perform
->aes(&op
);
581 cmd
->engine_error
= cmd_q
->cmd_error
;
585 ccp_process_data(&src
, NULL
, &op
);
588 /* Retrieve the AES context - convert from LE to BE using
589 * 32-byte (256-bit) byteswapping
591 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
592 CCP_PASSTHRU_BYTESWAP_256BIT
);
594 cmd
->engine_error
= cmd_q
->cmd_error
;
598 /* ...but we only need AES_BLOCK_SIZE bytes */
599 dm_offset
= CCP_SB_BYTES
- AES_BLOCK_SIZE
;
600 ccp_get_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
603 ccp_free_data(&src
, cmd_q
);
614 static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue
*cmd_q
,
617 struct ccp_aes_engine
*aes
= &cmd
->u
.aes
;
618 struct ccp_dm_workarea key
, ctx
, final_wa
, tag
;
619 struct ccp_data src
, dst
;
623 unsigned long long *final
;
624 unsigned int dm_offset
;
626 bool in_place
= true; /* Default value */
629 struct scatterlist
*p_inp
, sg_inp
[2];
630 struct scatterlist
*p_tag
, sg_tag
[2];
631 struct scatterlist
*p_outp
, sg_outp
[2];
632 struct scatterlist
*p_aad
;
637 if (!((aes
->key_len
== AES_KEYSIZE_128
) ||
638 (aes
->key_len
== AES_KEYSIZE_192
) ||
639 (aes
->key_len
== AES_KEYSIZE_256
)))
642 if (!aes
->key
) /* Gotta have a key SGL */
645 /* First, decompose the source buffer into AAD & PT,
646 * and the destination buffer into AAD, CT & tag, or
647 * the input into CT & tag.
648 * It is expected that the input and output SGs will
649 * be valid, even if the AAD and input lengths are 0.
652 p_inp
= scatterwalk_ffwd(sg_inp
, aes
->src
, aes
->aad_len
);
653 p_outp
= scatterwalk_ffwd(sg_outp
, aes
->dst
, aes
->aad_len
);
654 if (aes
->action
== CCP_AES_ACTION_ENCRYPT
) {
656 p_tag
= scatterwalk_ffwd(sg_tag
, p_outp
, ilen
);
658 /* Input length for decryption includes tag */
659 ilen
= aes
->src_len
- AES_BLOCK_SIZE
;
660 p_tag
= scatterwalk_ffwd(sg_tag
, p_inp
, ilen
);
663 memset(&op
, 0, sizeof(op
));
665 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
666 op
.sb_key
= cmd_q
->sb_key
; /* Pre-allocated */
667 op
.sb_ctx
= cmd_q
->sb_ctx
; /* Pre-allocated */
669 op
.u
.aes
.type
= aes
->type
;
671 /* Copy the key to the LSB */
672 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
673 CCP_AES_CTX_SB_COUNT
* CCP_SB_BYTES
,
678 dm_offset
= CCP_SB_BYTES
- aes
->key_len
;
679 ret
= ccp_set_dm_area(&key
, dm_offset
, aes
->key
, 0, aes
->key_len
);
682 ret
= ccp_copy_to_sb(cmd_q
, &key
, op
.jobid
, op
.sb_key
,
683 CCP_PASSTHRU_BYTESWAP_256BIT
);
685 cmd
->engine_error
= cmd_q
->cmd_error
;
689 /* Copy the context (IV) to the LSB.
690 * There is an assumption here that the IV is 96 bits in length, plus
691 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
693 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
694 CCP_AES_CTX_SB_COUNT
* CCP_SB_BYTES
,
699 dm_offset
= CCP_AES_CTX_SB_COUNT
* CCP_SB_BYTES
- aes
->iv_len
;
700 ret
= ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
704 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
705 CCP_PASSTHRU_BYTESWAP_256BIT
);
707 cmd
->engine_error
= cmd_q
->cmd_error
;
712 if (aes
->aad_len
> 0) {
713 /* Step 1: Run a GHASH over the Additional Authenticated Data */
714 ret
= ccp_init_data(&aad
, cmd_q
, p_aad
, aes
->aad_len
,
720 op
.u
.aes
.mode
= CCP_AES_MODE_GHASH
;
721 op
.u
.aes
.action
= CCP_AES_GHASHAAD
;
723 while (aad
.sg_wa
.bytes_left
) {
724 ccp_prepare_data(&aad
, NULL
, &op
, AES_BLOCK_SIZE
, true);
726 ret
= cmd_q
->ccp
->vdata
->perform
->aes(&op
);
728 cmd
->engine_error
= cmd_q
->cmd_error
;
732 ccp_process_data(&aad
, NULL
, &op
);
737 op
.u
.aes
.mode
= CCP_AES_MODE_GCTR
;
738 op
.u
.aes
.action
= aes
->action
;
741 /* Step 2: Run a GCTR over the plaintext */
742 in_place
= (sg_virt(p_inp
) == sg_virt(p_outp
)) ? true : false;
744 ret
= ccp_init_data(&src
, cmd_q
, p_inp
, ilen
,
746 in_place
? DMA_BIDIRECTIONAL
754 ret
= ccp_init_data(&dst
, cmd_q
, p_outp
, ilen
,
755 AES_BLOCK_SIZE
, DMA_FROM_DEVICE
);
763 while (src
.sg_wa
.bytes_left
) {
764 ccp_prepare_data(&src
, &dst
, &op
, AES_BLOCK_SIZE
, true);
765 if (!src
.sg_wa
.bytes_left
) {
766 unsigned int nbytes
= aes
->src_len
771 op
.u
.aes
.size
= (nbytes
* 8) - 1;
775 ret
= cmd_q
->ccp
->vdata
->perform
->aes(&op
);
777 cmd
->engine_error
= cmd_q
->cmd_error
;
781 ccp_process_data(&src
, &dst
, &op
);
786 /* Step 3: Update the IV portion of the context with the original IV */
787 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
788 CCP_PASSTHRU_BYTESWAP_256BIT
);
790 cmd
->engine_error
= cmd_q
->cmd_error
;
794 ret
= ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
798 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
799 CCP_PASSTHRU_BYTESWAP_256BIT
);
801 cmd
->engine_error
= cmd_q
->cmd_error
;
805 /* Step 4: Concatenate the lengths of the AAD and source, and
806 * hash that 16 byte buffer.
808 ret
= ccp_init_dm_workarea(&final_wa
, cmd_q
, AES_BLOCK_SIZE
,
812 final
= (unsigned long long *) final_wa
.address
;
813 final
[0] = cpu_to_be64(aes
->aad_len
* 8);
814 final
[1] = cpu_to_be64(ilen
* 8);
816 op
.u
.aes
.mode
= CCP_AES_MODE_GHASH
;
817 op
.u
.aes
.action
= CCP_AES_GHASHFINAL
;
818 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
819 op
.src
.u
.dma
.address
= final_wa
.dma
.address
;
820 op
.src
.u
.dma
.length
= AES_BLOCK_SIZE
;
821 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
822 op
.dst
.u
.dma
.address
= final_wa
.dma
.address
;
823 op
.dst
.u
.dma
.length
= AES_BLOCK_SIZE
;
826 ret
= cmd_q
->ccp
->vdata
->perform
->aes(&op
);
830 if (aes
->action
== CCP_AES_ACTION_ENCRYPT
) {
831 /* Put the ciphered tag after the ciphertext. */
832 ccp_get_dm_area(&final_wa
, 0, p_tag
, 0, AES_BLOCK_SIZE
);
834 /* Does this ciphered tag match the input? */
835 ret
= ccp_init_dm_workarea(&tag
, cmd_q
, AES_BLOCK_SIZE
,
839 ret
= ccp_set_dm_area(&tag
, 0, p_tag
, 0, AES_BLOCK_SIZE
);
843 ret
= memcmp(tag
.address
, final_wa
.address
, AES_BLOCK_SIZE
);
848 ccp_dm_free(&final_wa
);
851 if (aes
->src_len
&& !in_place
)
852 ccp_free_data(&dst
, cmd_q
);
856 ccp_free_data(&src
, cmd_q
);
860 ccp_free_data(&aad
, cmd_q
);
871 static int ccp_run_aes_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
873 struct ccp_aes_engine
*aes
= &cmd
->u
.aes
;
874 struct ccp_dm_workarea key
, ctx
;
875 struct ccp_data src
, dst
;
877 unsigned int dm_offset
;
878 bool in_place
= false;
881 if (aes
->mode
== CCP_AES_MODE_CMAC
)
882 return ccp_run_aes_cmac_cmd(cmd_q
, cmd
);
884 if (aes
->mode
== CCP_AES_MODE_GCM
)
885 return ccp_run_aes_gcm_cmd(cmd_q
, cmd
);
887 if (!((aes
->key_len
== AES_KEYSIZE_128
) ||
888 (aes
->key_len
== AES_KEYSIZE_192
) ||
889 (aes
->key_len
== AES_KEYSIZE_256
)))
892 if (((aes
->mode
== CCP_AES_MODE_ECB
) ||
893 (aes
->mode
== CCP_AES_MODE_CBC
) ||
894 (aes
->mode
== CCP_AES_MODE_CFB
)) &&
895 (aes
->src_len
& (AES_BLOCK_SIZE
- 1)))
898 if (!aes
->key
|| !aes
->src
|| !aes
->dst
)
901 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
902 if (aes
->iv_len
!= AES_BLOCK_SIZE
)
909 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT
!= 1);
910 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT
!= 1);
913 memset(&op
, 0, sizeof(op
));
915 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
916 op
.sb_key
= cmd_q
->sb_key
;
917 op
.sb_ctx
= cmd_q
->sb_ctx
;
918 op
.init
= (aes
->mode
== CCP_AES_MODE_ECB
) ? 0 : 1;
919 op
.u
.aes
.type
= aes
->type
;
920 op
.u
.aes
.mode
= aes
->mode
;
921 op
.u
.aes
.action
= aes
->action
;
923 /* All supported key sizes fit in a single (32-byte) SB entry
924 * and must be in little endian format. Use the 256-bit byte
925 * swap passthru option to convert from big endian to little
928 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
929 CCP_AES_KEY_SB_COUNT
* CCP_SB_BYTES
,
934 dm_offset
= CCP_SB_BYTES
- aes
->key_len
;
935 ret
= ccp_set_dm_area(&key
, dm_offset
, aes
->key
, 0, aes
->key_len
);
938 ret
= ccp_copy_to_sb(cmd_q
, &key
, op
.jobid
, op
.sb_key
,
939 CCP_PASSTHRU_BYTESWAP_256BIT
);
941 cmd
->engine_error
= cmd_q
->cmd_error
;
945 /* The AES context fits in a single (32-byte) SB entry and
946 * must be in little endian format. Use the 256-bit byte swap
947 * passthru option to convert from big endian to little endian.
949 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
950 CCP_AES_CTX_SB_COUNT
* CCP_SB_BYTES
,
955 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
956 /* Load the AES context - convert to LE */
957 dm_offset
= CCP_SB_BYTES
- AES_BLOCK_SIZE
;
958 ret
= ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
961 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
962 CCP_PASSTHRU_BYTESWAP_256BIT
);
964 cmd
->engine_error
= cmd_q
->cmd_error
;
969 case CCP_AES_MODE_CFB
: /* CFB128 only */
970 case CCP_AES_MODE_CTR
:
971 op
.u
.aes
.size
= AES_BLOCK_SIZE
* BITS_PER_BYTE
- 1;
977 /* Prepare the input and output data workareas. For in-place
978 * operations we need to set the dma direction to BIDIRECTIONAL
979 * and copy the src workarea to the dst workarea.
981 if (sg_virt(aes
->src
) == sg_virt(aes
->dst
))
984 ret
= ccp_init_data(&src
, cmd_q
, aes
->src
, aes
->src_len
,
986 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
993 ret
= ccp_init_data(&dst
, cmd_q
, aes
->dst
, aes
->src_len
,
994 AES_BLOCK_SIZE
, DMA_FROM_DEVICE
);
999 /* Send data to the CCP AES engine */
1000 while (src
.sg_wa
.bytes_left
) {
1001 ccp_prepare_data(&src
, &dst
, &op
, AES_BLOCK_SIZE
, true);
1002 if (!src
.sg_wa
.bytes_left
) {
1005 /* Since we don't retrieve the AES context in ECB
1006 * mode we have to wait for the operation to complete
1007 * on the last piece of data
1009 if (aes
->mode
== CCP_AES_MODE_ECB
)
1013 ret
= cmd_q
->ccp
->vdata
->perform
->aes(&op
);
1015 cmd
->engine_error
= cmd_q
->cmd_error
;
1019 ccp_process_data(&src
, &dst
, &op
);
1022 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
1023 /* Retrieve the AES context - convert from LE to BE using
1024 * 32-byte (256-bit) byteswapping
1026 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1027 CCP_PASSTHRU_BYTESWAP_256BIT
);
1029 cmd
->engine_error
= cmd_q
->cmd_error
;
1033 /* ...but we only need AES_BLOCK_SIZE bytes */
1034 dm_offset
= CCP_SB_BYTES
- AES_BLOCK_SIZE
;
1035 ccp_get_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
1040 ccp_free_data(&dst
, cmd_q
);
1043 ccp_free_data(&src
, cmd_q
);
1054 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue
*cmd_q
,
1055 struct ccp_cmd
*cmd
)
1057 struct ccp_xts_aes_engine
*xts
= &cmd
->u
.xts
;
1058 struct ccp_dm_workarea key
, ctx
;
1059 struct ccp_data src
, dst
;
1061 unsigned int unit_size
, dm_offset
;
1062 bool in_place
= false;
1063 unsigned int sb_count
;
1064 enum ccp_aes_type aestype
;
1067 switch (xts
->unit_size
) {
1068 case CCP_XTS_AES_UNIT_SIZE_16
:
1071 case CCP_XTS_AES_UNIT_SIZE_512
:
1074 case CCP_XTS_AES_UNIT_SIZE_1024
:
1077 case CCP_XTS_AES_UNIT_SIZE_2048
:
1080 case CCP_XTS_AES_UNIT_SIZE_4096
:
1088 if (xts
->key_len
== AES_KEYSIZE_128
)
1089 aestype
= CCP_AES_TYPE_128
;
1090 else if (xts
->key_len
== AES_KEYSIZE_256
)
1091 aestype
= CCP_AES_TYPE_256
;
1095 if (!xts
->final
&& (xts
->src_len
& (AES_BLOCK_SIZE
- 1)))
1098 if (xts
->iv_len
!= AES_BLOCK_SIZE
)
1101 if (!xts
->key
|| !xts
->iv
|| !xts
->src
|| !xts
->dst
)
1104 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT
!= 1);
1105 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT
!= 1);
1108 memset(&op
, 0, sizeof(op
));
1110 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
1111 op
.sb_key
= cmd_q
->sb_key
;
1112 op
.sb_ctx
= cmd_q
->sb_ctx
;
1114 op
.u
.xts
.type
= aestype
;
1115 op
.u
.xts
.action
= xts
->action
;
1116 op
.u
.xts
.unit_size
= xts
->unit_size
;
1118 /* A version 3 device only supports 128-bit keys, which fits into a
1119 * single SB entry. A version 5 device uses a 512-bit vector, so two
1122 if (cmd_q
->ccp
->vdata
->version
== CCP_VERSION(3, 0))
1123 sb_count
= CCP_XTS_AES_KEY_SB_COUNT
;
1125 sb_count
= CCP5_XTS_AES_KEY_SB_COUNT
;
1126 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
1127 sb_count
* CCP_SB_BYTES
,
1132 if (cmd_q
->ccp
->vdata
->version
== CCP_VERSION(3, 0)) {
1133 /* All supported key sizes must be in little endian format.
1134 * Use the 256-bit byte swap passthru option to convert from
1135 * big endian to little endian.
1137 dm_offset
= CCP_SB_BYTES
- AES_KEYSIZE_128
;
1138 ret
= ccp_set_dm_area(&key
, dm_offset
, xts
->key
, 0, xts
->key_len
);
1141 ret
= ccp_set_dm_area(&key
, 0, xts
->key
, xts
->key_len
, xts
->key_len
);
1145 /* Version 5 CCPs use a 512-bit space for the key: each portion
1146 * occupies 256 bits, or one entire slot, and is zero-padded.
1150 dm_offset
= CCP_SB_BYTES
;
1151 pad
= dm_offset
- xts
->key_len
;
1152 ret
= ccp_set_dm_area(&key
, pad
, xts
->key
, 0, xts
->key_len
);
1155 ret
= ccp_set_dm_area(&key
, dm_offset
+ pad
, xts
->key
,
1156 xts
->key_len
, xts
->key_len
);
1160 ret
= ccp_copy_to_sb(cmd_q
, &key
, op
.jobid
, op
.sb_key
,
1161 CCP_PASSTHRU_BYTESWAP_256BIT
);
1163 cmd
->engine_error
= cmd_q
->cmd_error
;
1167 /* The AES context fits in a single (32-byte) SB entry and
1168 * for XTS is already in little endian format so no byte swapping
1171 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
1172 CCP_XTS_AES_CTX_SB_COUNT
* CCP_SB_BYTES
,
1177 ret
= ccp_set_dm_area(&ctx
, 0, xts
->iv
, 0, xts
->iv_len
);
1180 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1181 CCP_PASSTHRU_BYTESWAP_NOOP
);
1183 cmd
->engine_error
= cmd_q
->cmd_error
;
1187 /* Prepare the input and output data workareas. For in-place
1188 * operations we need to set the dma direction to BIDIRECTIONAL
1189 * and copy the src workarea to the dst workarea.
1191 if (sg_virt(xts
->src
) == sg_virt(xts
->dst
))
1194 ret
= ccp_init_data(&src
, cmd_q
, xts
->src
, xts
->src_len
,
1196 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1203 ret
= ccp_init_data(&dst
, cmd_q
, xts
->dst
, xts
->src_len
,
1204 unit_size
, DMA_FROM_DEVICE
);
1209 /* Send data to the CCP AES engine */
1210 while (src
.sg_wa
.bytes_left
) {
1211 ccp_prepare_data(&src
, &dst
, &op
, unit_size
, true);
1212 if (!src
.sg_wa
.bytes_left
)
1215 ret
= cmd_q
->ccp
->vdata
->perform
->xts_aes(&op
);
1217 cmd
->engine_error
= cmd_q
->cmd_error
;
1221 ccp_process_data(&src
, &dst
, &op
);
1224 /* Retrieve the AES context - convert from LE to BE using
1225 * 32-byte (256-bit) byteswapping
1227 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1228 CCP_PASSTHRU_BYTESWAP_256BIT
);
1230 cmd
->engine_error
= cmd_q
->cmd_error
;
1234 /* ...but we only need AES_BLOCK_SIZE bytes */
1235 dm_offset
= CCP_SB_BYTES
- AES_BLOCK_SIZE
;
1236 ccp_get_dm_area(&ctx
, dm_offset
, xts
->iv
, 0, xts
->iv_len
);
1240 ccp_free_data(&dst
, cmd_q
);
1243 ccp_free_data(&src
, cmd_q
);
1254 static int ccp_run_des3_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1256 struct ccp_des3_engine
*des3
= &cmd
->u
.des3
;
1258 struct ccp_dm_workarea key
, ctx
;
1259 struct ccp_data src
, dst
;
1261 unsigned int dm_offset
;
1262 unsigned int len_singlekey
;
1263 bool in_place
= false;
1267 if (!cmd_q
->ccp
->vdata
->perform
->des3
)
1270 if (des3
->key_len
!= DES3_EDE_KEY_SIZE
)
1273 if (((des3
->mode
== CCP_DES3_MODE_ECB
) ||
1274 (des3
->mode
== CCP_DES3_MODE_CBC
)) &&
1275 (des3
->src_len
& (DES3_EDE_BLOCK_SIZE
- 1)))
1278 if (!des3
->key
|| !des3
->src
|| !des3
->dst
)
1281 if (des3
->mode
!= CCP_DES3_MODE_ECB
) {
1282 if (des3
->iv_len
!= DES3_EDE_BLOCK_SIZE
)
1290 /* Zero out all the fields of the command desc */
1291 memset(&op
, 0, sizeof(op
));
1293 /* Set up the Function field */
1295 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
1296 op
.sb_key
= cmd_q
->sb_key
;
1298 op
.init
= (des3
->mode
== CCP_DES3_MODE_ECB
) ? 0 : 1;
1299 op
.u
.des3
.type
= des3
->type
;
1300 op
.u
.des3
.mode
= des3
->mode
;
1301 op
.u
.des3
.action
= des3
->action
;
1304 * All supported key sizes fit in a single (32-byte) KSB entry and
1305 * (like AES) must be in little endian format. Use the 256-bit byte
1306 * swap passthru option to convert from big endian to little endian.
1308 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
1309 CCP_DES3_KEY_SB_COUNT
* CCP_SB_BYTES
,
1315 * The contents of the key triplet are in the reverse order of what
1316 * is required by the engine. Copy the 3 pieces individually to put
1317 * them where they belong.
1319 dm_offset
= CCP_SB_BYTES
- des3
->key_len
; /* Basic offset */
1321 len_singlekey
= des3
->key_len
/ 3;
1322 ret
= ccp_set_dm_area(&key
, dm_offset
+ 2 * len_singlekey
,
1323 des3
->key
, 0, len_singlekey
);
1326 ret
= ccp_set_dm_area(&key
, dm_offset
+ len_singlekey
,
1327 des3
->key
, len_singlekey
, len_singlekey
);
1330 ret
= ccp_set_dm_area(&key
, dm_offset
,
1331 des3
->key
, 2 * len_singlekey
, len_singlekey
);
1335 /* Copy the key to the SB */
1336 ret
= ccp_copy_to_sb(cmd_q
, &key
, op
.jobid
, op
.sb_key
,
1337 CCP_PASSTHRU_BYTESWAP_256BIT
);
1339 cmd
->engine_error
= cmd_q
->cmd_error
;
1344 * The DES3 context fits in a single (32-byte) KSB entry and
1345 * must be in little endian format. Use the 256-bit byte swap
1346 * passthru option to convert from big endian to little endian.
1348 if (des3
->mode
!= CCP_DES3_MODE_ECB
) {
1351 op
.sb_ctx
= cmd_q
->sb_ctx
;
1353 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
1354 CCP_DES3_CTX_SB_COUNT
* CCP_SB_BYTES
,
1359 /* Load the context into the LSB */
1360 dm_offset
= CCP_SB_BYTES
- des3
->iv_len
;
1361 ret
= ccp_set_dm_area(&ctx
, dm_offset
, des3
->iv
, 0,
1366 if (cmd_q
->ccp
->vdata
->version
== CCP_VERSION(3, 0))
1367 load_mode
= CCP_PASSTHRU_BYTESWAP_NOOP
;
1369 load_mode
= CCP_PASSTHRU_BYTESWAP_256BIT
;
1370 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1373 cmd
->engine_error
= cmd_q
->cmd_error
;
1379 * Prepare the input and output data workareas. For in-place
1380 * operations we need to set the dma direction to BIDIRECTIONAL
1381 * and copy the src workarea to the dst workarea.
1383 if (sg_virt(des3
->src
) == sg_virt(des3
->dst
))
1386 ret
= ccp_init_data(&src
, cmd_q
, des3
->src
, des3
->src_len
,
1387 DES3_EDE_BLOCK_SIZE
,
1388 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1395 ret
= ccp_init_data(&dst
, cmd_q
, des3
->dst
, des3
->src_len
,
1396 DES3_EDE_BLOCK_SIZE
, DMA_FROM_DEVICE
);
1401 /* Send data to the CCP DES3 engine */
1402 while (src
.sg_wa
.bytes_left
) {
1403 ccp_prepare_data(&src
, &dst
, &op
, DES3_EDE_BLOCK_SIZE
, true);
1404 if (!src
.sg_wa
.bytes_left
) {
1407 /* Since we don't retrieve the context in ECB mode
1408 * we have to wait for the operation to complete
1409 * on the last piece of data
1414 ret
= cmd_q
->ccp
->vdata
->perform
->des3(&op
);
1416 cmd
->engine_error
= cmd_q
->cmd_error
;
1420 ccp_process_data(&src
, &dst
, &op
);
1423 if (des3
->mode
!= CCP_DES3_MODE_ECB
) {
1424 /* Retrieve the context and make BE */
1425 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1426 CCP_PASSTHRU_BYTESWAP_256BIT
);
1428 cmd
->engine_error
= cmd_q
->cmd_error
;
1432 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1433 if (cmd_q
->ccp
->vdata
->version
== CCP_VERSION(3, 0))
1434 dm_offset
= CCP_SB_BYTES
- des3
->iv_len
;
1437 ccp_get_dm_area(&ctx
, dm_offset
, des3
->iv
, 0,
1438 DES3_EDE_BLOCK_SIZE
);
1442 ccp_free_data(&dst
, cmd_q
);
1445 ccp_free_data(&src
, cmd_q
);
1448 if (des3
->mode
!= CCP_DES3_MODE_ECB
)
1457 static int ccp_run_sha_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1459 struct ccp_sha_engine
*sha
= &cmd
->u
.sha
;
1460 struct ccp_dm_workarea ctx
;
1461 struct ccp_data src
;
1463 unsigned int ioffset
, ooffset
;
1464 unsigned int digest_size
;
1471 switch (sha
->type
) {
1472 case CCP_SHA_TYPE_1
:
1473 if (sha
->ctx_len
< SHA1_DIGEST_SIZE
)
1475 block_size
= SHA1_BLOCK_SIZE
;
1477 case CCP_SHA_TYPE_224
:
1478 if (sha
->ctx_len
< SHA224_DIGEST_SIZE
)
1480 block_size
= SHA224_BLOCK_SIZE
;
1482 case CCP_SHA_TYPE_256
:
1483 if (sha
->ctx_len
< SHA256_DIGEST_SIZE
)
1485 block_size
= SHA256_BLOCK_SIZE
;
1487 case CCP_SHA_TYPE_384
:
1488 if (cmd_q
->ccp
->vdata
->version
< CCP_VERSION(4, 0)
1489 || sha
->ctx_len
< SHA384_DIGEST_SIZE
)
1491 block_size
= SHA384_BLOCK_SIZE
;
1493 case CCP_SHA_TYPE_512
:
1494 if (cmd_q
->ccp
->vdata
->version
< CCP_VERSION(4, 0)
1495 || sha
->ctx_len
< SHA512_DIGEST_SIZE
)
1497 block_size
= SHA512_BLOCK_SIZE
;
1506 if (!sha
->final
&& (sha
->src_len
& (block_size
- 1)))
1509 /* The version 3 device can't handle zero-length input */
1510 if (cmd_q
->ccp
->vdata
->version
== CCP_VERSION(3, 0)) {
1512 if (!sha
->src_len
) {
1513 unsigned int digest_len
;
1516 /* Not final, just return */
1520 /* CCP can't do a zero length sha operation so the
1521 * caller must buffer the data.
1526 /* The CCP cannot perform zero-length sha operations
1527 * so the caller is required to buffer data for the
1528 * final operation. However, a sha operation for a
1529 * message with a total length of zero is valid so
1530 * known values are required to supply the result.
1532 switch (sha
->type
) {
1533 case CCP_SHA_TYPE_1
:
1534 sha_zero
= sha1_zero_message_hash
;
1535 digest_len
= SHA1_DIGEST_SIZE
;
1537 case CCP_SHA_TYPE_224
:
1538 sha_zero
= sha224_zero_message_hash
;
1539 digest_len
= SHA224_DIGEST_SIZE
;
1541 case CCP_SHA_TYPE_256
:
1542 sha_zero
= sha256_zero_message_hash
;
1543 digest_len
= SHA256_DIGEST_SIZE
;
1549 scatterwalk_map_and_copy((void *)sha_zero
, sha
->ctx
, 0,
1556 /* Set variables used throughout */
1557 switch (sha
->type
) {
1558 case CCP_SHA_TYPE_1
:
1559 digest_size
= SHA1_DIGEST_SIZE
;
1560 init
= (void *) ccp_sha1_init
;
1561 ctx_size
= SHA1_DIGEST_SIZE
;
1563 if (cmd_q
->ccp
->vdata
->version
!= CCP_VERSION(3, 0))
1564 ooffset
= ioffset
= CCP_SB_BYTES
- SHA1_DIGEST_SIZE
;
1566 ooffset
= ioffset
= 0;
1568 case CCP_SHA_TYPE_224
:
1569 digest_size
= SHA224_DIGEST_SIZE
;
1570 init
= (void *) ccp_sha224_init
;
1571 ctx_size
= SHA256_DIGEST_SIZE
;
1574 if (cmd_q
->ccp
->vdata
->version
!= CCP_VERSION(3, 0))
1575 ooffset
= CCP_SB_BYTES
- SHA224_DIGEST_SIZE
;
1579 case CCP_SHA_TYPE_256
:
1580 digest_size
= SHA256_DIGEST_SIZE
;
1581 init
= (void *) ccp_sha256_init
;
1582 ctx_size
= SHA256_DIGEST_SIZE
;
1584 ooffset
= ioffset
= 0;
1586 case CCP_SHA_TYPE_384
:
1587 digest_size
= SHA384_DIGEST_SIZE
;
1588 init
= (void *) ccp_sha384_init
;
1589 ctx_size
= SHA512_DIGEST_SIZE
;
1592 ooffset
= 2 * CCP_SB_BYTES
- SHA384_DIGEST_SIZE
;
1594 case CCP_SHA_TYPE_512
:
1595 digest_size
= SHA512_DIGEST_SIZE
;
1596 init
= (void *) ccp_sha512_init
;
1597 ctx_size
= SHA512_DIGEST_SIZE
;
1599 ooffset
= ioffset
= 0;
1606 /* For zero-length plaintext the src pointer is ignored;
1607 * otherwise both parts must be valid
1609 if (sha
->src_len
&& !sha
->src
)
1612 memset(&op
, 0, sizeof(op
));
1614 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
1615 op
.sb_ctx
= cmd_q
->sb_ctx
; /* Pre-allocated */
1616 op
.u
.sha
.type
= sha
->type
;
1617 op
.u
.sha
.msg_bits
= sha
->msg_bits
;
1619 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1620 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1621 * first slot, and the left half in the second. Each portion must then
1622 * be in little endian format: use the 256-bit byte swap option.
1624 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
, sb_count
* CCP_SB_BYTES
,
1629 switch (sha
->type
) {
1630 case CCP_SHA_TYPE_1
:
1631 case CCP_SHA_TYPE_224
:
1632 case CCP_SHA_TYPE_256
:
1633 memcpy(ctx
.address
+ ioffset
, init
, ctx_size
);
1635 case CCP_SHA_TYPE_384
:
1636 case CCP_SHA_TYPE_512
:
1637 memcpy(ctx
.address
+ ctx_size
/ 2, init
,
1639 memcpy(ctx
.address
, init
+ ctx_size
/ 2,
1647 /* Restore the context */
1648 ret
= ccp_set_dm_area(&ctx
, 0, sha
->ctx
, 0,
1649 sb_count
* CCP_SB_BYTES
);
1654 ret
= ccp_copy_to_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1655 CCP_PASSTHRU_BYTESWAP_256BIT
);
1657 cmd
->engine_error
= cmd_q
->cmd_error
;
1662 /* Send data to the CCP SHA engine; block_size is set above */
1663 ret
= ccp_init_data(&src
, cmd_q
, sha
->src
, sha
->src_len
,
1664 block_size
, DMA_TO_DEVICE
);
1668 while (src
.sg_wa
.bytes_left
) {
1669 ccp_prepare_data(&src
, NULL
, &op
, block_size
, false);
1670 if (sha
->final
&& !src
.sg_wa
.bytes_left
)
1673 ret
= cmd_q
->ccp
->vdata
->perform
->sha(&op
);
1675 cmd
->engine_error
= cmd_q
->cmd_error
;
1679 ccp_process_data(&src
, NULL
, &op
);
1683 ret
= cmd_q
->ccp
->vdata
->perform
->sha(&op
);
1685 cmd
->engine_error
= cmd_q
->cmd_error
;
1690 /* Retrieve the SHA context - convert from LE to BE using
1691 * 32-byte (256-bit) byteswapping to BE
1693 ret
= ccp_copy_from_sb(cmd_q
, &ctx
, op
.jobid
, op
.sb_ctx
,
1694 CCP_PASSTHRU_BYTESWAP_256BIT
);
1696 cmd
->engine_error
= cmd_q
->cmd_error
;
1701 /* Finishing up, so get the digest */
1702 switch (sha
->type
) {
1703 case CCP_SHA_TYPE_1
:
1704 case CCP_SHA_TYPE_224
:
1705 case CCP_SHA_TYPE_256
:
1706 ccp_get_dm_area(&ctx
, ooffset
,
1710 case CCP_SHA_TYPE_384
:
1711 case CCP_SHA_TYPE_512
:
1712 ccp_get_dm_area(&ctx
, 0,
1713 sha
->ctx
, LSB_ITEM_SIZE
- ooffset
,
1715 ccp_get_dm_area(&ctx
, LSB_ITEM_SIZE
+ ooffset
,
1717 LSB_ITEM_SIZE
- ooffset
);
1724 /* Stash the context */
1725 ccp_get_dm_area(&ctx
, 0, sha
->ctx
, 0,
1726 sb_count
* CCP_SB_BYTES
);
1729 if (sha
->final
&& sha
->opad
) {
1730 /* HMAC operation, recursively perform final SHA */
1731 struct ccp_cmd hmac_cmd
;
1732 struct scatterlist sg
;
1735 if (sha
->opad_len
!= block_size
) {
1740 hmac_buf
= kmalloc(block_size
+ digest_size
, GFP_KERNEL
);
1745 sg_init_one(&sg
, hmac_buf
, block_size
+ digest_size
);
1747 scatterwalk_map_and_copy(hmac_buf
, sha
->opad
, 0, block_size
, 0);
1748 switch (sha
->type
) {
1749 case CCP_SHA_TYPE_1
:
1750 case CCP_SHA_TYPE_224
:
1751 case CCP_SHA_TYPE_256
:
1752 memcpy(hmac_buf
+ block_size
,
1753 ctx
.address
+ ooffset
,
1756 case CCP_SHA_TYPE_384
:
1757 case CCP_SHA_TYPE_512
:
1758 memcpy(hmac_buf
+ block_size
,
1759 ctx
.address
+ LSB_ITEM_SIZE
+ ooffset
,
1761 memcpy(hmac_buf
+ block_size
+
1762 (LSB_ITEM_SIZE
- ooffset
),
1771 memset(&hmac_cmd
, 0, sizeof(hmac_cmd
));
1772 hmac_cmd
.engine
= CCP_ENGINE_SHA
;
1773 hmac_cmd
.u
.sha
.type
= sha
->type
;
1774 hmac_cmd
.u
.sha
.ctx
= sha
->ctx
;
1775 hmac_cmd
.u
.sha
.ctx_len
= sha
->ctx_len
;
1776 hmac_cmd
.u
.sha
.src
= &sg
;
1777 hmac_cmd
.u
.sha
.src_len
= block_size
+ digest_size
;
1778 hmac_cmd
.u
.sha
.opad
= NULL
;
1779 hmac_cmd
.u
.sha
.opad_len
= 0;
1780 hmac_cmd
.u
.sha
.first
= 1;
1781 hmac_cmd
.u
.sha
.final
= 1;
1782 hmac_cmd
.u
.sha
.msg_bits
= (block_size
+ digest_size
) << 3;
1784 ret
= ccp_run_sha_cmd(cmd_q
, &hmac_cmd
);
1786 cmd
->engine_error
= hmac_cmd
.engine_error
;
1793 ccp_free_data(&src
, cmd_q
);
1801 static int ccp_run_rsa_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1803 struct ccp_rsa_engine
*rsa
= &cmd
->u
.rsa
;
1804 struct ccp_dm_workarea exp
, src
, dst
;
1806 unsigned int sb_count
, i_len
, o_len
;
1809 /* Check against the maximum allowable size, in bits */
1810 if (rsa
->key_size
> cmd_q
->ccp
->vdata
->rsamax
)
1813 if (!rsa
->exp
|| !rsa
->mod
|| !rsa
->src
|| !rsa
->dst
)
1816 memset(&op
, 0, sizeof(op
));
1818 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
1820 /* The RSA modulus must precede the message being acted upon, so
1821 * it must be copied to a DMA area where the message and the
1822 * modulus can be concatenated. Therefore the input buffer
1823 * length required is twice the output buffer length (which
1824 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1825 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1828 o_len
= 32 * ((rsa
->key_size
+ 255) / 256);
1832 if (cmd_q
->ccp
->vdata
->version
< CCP_VERSION(5, 0)) {
1833 /* sb_count is the number of storage block slots required
1836 sb_count
= o_len
/ CCP_SB_BYTES
;
1837 op
.sb_key
= cmd_q
->ccp
->vdata
->perform
->sballoc(cmd_q
,
1842 /* A version 5 device allows a modulus size that will not fit
1843 * in the LSB, so the command will transfer it from memory.
1844 * Set the sb key to the default, even though it's not used.
1846 op
.sb_key
= cmd_q
->sb_key
;
1849 /* The RSA exponent must be in little endian format. Reverse its
1852 ret
= ccp_init_dm_workarea(&exp
, cmd_q
, o_len
, DMA_TO_DEVICE
);
1856 ret
= ccp_reverse_set_dm_area(&exp
, 0, rsa
->exp
, 0, rsa
->exp_len
);
1860 if (cmd_q
->ccp
->vdata
->version
< CCP_VERSION(5, 0)) {
1861 /* Copy the exponent to the local storage block, using
1862 * as many 32-byte blocks as were allocated above. It's
1863 * already little endian, so no further change is required.
1865 ret
= ccp_copy_to_sb(cmd_q
, &exp
, op
.jobid
, op
.sb_key
,
1866 CCP_PASSTHRU_BYTESWAP_NOOP
);
1868 cmd
->engine_error
= cmd_q
->cmd_error
;
1872 /* The exponent can be retrieved from memory via DMA. */
1873 op
.exp
.u
.dma
.address
= exp
.dma
.address
;
1874 op
.exp
.u
.dma
.offset
= 0;
1877 /* Concatenate the modulus and the message. Both the modulus and
1878 * the operands must be in little endian format. Since the input
1879 * is in big endian format it must be converted.
1881 ret
= ccp_init_dm_workarea(&src
, cmd_q
, i_len
, DMA_TO_DEVICE
);
1885 ret
= ccp_reverse_set_dm_area(&src
, 0, rsa
->mod
, 0, rsa
->mod_len
);
1888 ret
= ccp_reverse_set_dm_area(&src
, o_len
, rsa
->src
, 0, rsa
->src_len
);
1892 /* Prepare the output area for the operation */
1893 ret
= ccp_init_dm_workarea(&dst
, cmd_q
, o_len
, DMA_FROM_DEVICE
);
1898 op
.src
.u
.dma
.address
= src
.dma
.address
;
1899 op
.src
.u
.dma
.offset
= 0;
1900 op
.src
.u
.dma
.length
= i_len
;
1901 op
.dst
.u
.dma
.address
= dst
.dma
.address
;
1902 op
.dst
.u
.dma
.offset
= 0;
1903 op
.dst
.u
.dma
.length
= o_len
;
1905 op
.u
.rsa
.mod_size
= rsa
->key_size
;
1906 op
.u
.rsa
.input_len
= i_len
;
1908 ret
= cmd_q
->ccp
->vdata
->perform
->rsa(&op
);
1910 cmd
->engine_error
= cmd_q
->cmd_error
;
1914 ccp_reverse_get_dm_area(&dst
, 0, rsa
->dst
, 0, rsa
->mod_len
);
1927 cmd_q
->ccp
->vdata
->perform
->sbfree(cmd_q
, op
.sb_key
, sb_count
);
1932 static int ccp_run_passthru_cmd(struct ccp_cmd_queue
*cmd_q
,
1933 struct ccp_cmd
*cmd
)
1935 struct ccp_passthru_engine
*pt
= &cmd
->u
.passthru
;
1936 struct ccp_dm_workarea mask
;
1937 struct ccp_data src
, dst
;
1939 bool in_place
= false;
1943 if (!pt
->final
&& (pt
->src_len
& (CCP_PASSTHRU_BLOCKSIZE
- 1)))
1946 if (!pt
->src
|| !pt
->dst
)
1949 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
1950 if (pt
->mask_len
!= CCP_PASSTHRU_MASKSIZE
)
1956 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT
!= 1);
1958 memset(&op
, 0, sizeof(op
));
1960 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
1962 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
1964 op
.sb_key
= cmd_q
->sb_key
;
1966 ret
= ccp_init_dm_workarea(&mask
, cmd_q
,
1967 CCP_PASSTHRU_SB_COUNT
*
1973 ret
= ccp_set_dm_area(&mask
, 0, pt
->mask
, 0, pt
->mask_len
);
1976 ret
= ccp_copy_to_sb(cmd_q
, &mask
, op
.jobid
, op
.sb_key
,
1977 CCP_PASSTHRU_BYTESWAP_NOOP
);
1979 cmd
->engine_error
= cmd_q
->cmd_error
;
1984 /* Prepare the input and output data workareas. For in-place
1985 * operations we need to set the dma direction to BIDIRECTIONAL
1986 * and copy the src workarea to the dst workarea.
1988 if (sg_virt(pt
->src
) == sg_virt(pt
->dst
))
1991 ret
= ccp_init_data(&src
, cmd_q
, pt
->src
, pt
->src_len
,
1992 CCP_PASSTHRU_MASKSIZE
,
1993 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
2000 ret
= ccp_init_data(&dst
, cmd_q
, pt
->dst
, pt
->src_len
,
2001 CCP_PASSTHRU_MASKSIZE
, DMA_FROM_DEVICE
);
2006 /* Send data to the CCP Passthru engine
2007 * Because the CCP engine works on a single source and destination
2008 * dma address at a time, each entry in the source scatterlist
2009 * (after the dma_map_sg call) must be less than or equal to the
2010 * (remaining) length in the destination scatterlist entry and the
2011 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2013 dst
.sg_wa
.sg_used
= 0;
2014 for (i
= 1; i
<= src
.sg_wa
.dma_count
; i
++) {
2015 if (!dst
.sg_wa
.sg
||
2016 (dst
.sg_wa
.sg
->length
< src
.sg_wa
.sg
->length
)) {
2021 if (i
== src
.sg_wa
.dma_count
) {
2026 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
2027 op
.src
.u
.dma
.address
= sg_dma_address(src
.sg_wa
.sg
);
2028 op
.src
.u
.dma
.offset
= 0;
2029 op
.src
.u
.dma
.length
= sg_dma_len(src
.sg_wa
.sg
);
2031 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
2032 op
.dst
.u
.dma
.address
= sg_dma_address(dst
.sg_wa
.sg
);
2033 op
.dst
.u
.dma
.offset
= dst
.sg_wa
.sg_used
;
2034 op
.dst
.u
.dma
.length
= op
.src
.u
.dma
.length
;
2036 ret
= cmd_q
->ccp
->vdata
->perform
->passthru(&op
);
2038 cmd
->engine_error
= cmd_q
->cmd_error
;
2042 dst
.sg_wa
.sg_used
+= src
.sg_wa
.sg
->length
;
2043 if (dst
.sg_wa
.sg_used
== dst
.sg_wa
.sg
->length
) {
2044 dst
.sg_wa
.sg
= sg_next(dst
.sg_wa
.sg
);
2045 dst
.sg_wa
.sg_used
= 0;
2047 src
.sg_wa
.sg
= sg_next(src
.sg_wa
.sg
);
2052 ccp_free_data(&dst
, cmd_q
);
2055 ccp_free_data(&src
, cmd_q
);
2058 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
)
2064 static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue
*cmd_q
,
2065 struct ccp_cmd
*cmd
)
2067 struct ccp_passthru_nomap_engine
*pt
= &cmd
->u
.passthru_nomap
;
2068 struct ccp_dm_workarea mask
;
2072 if (!pt
->final
&& (pt
->src_len
& (CCP_PASSTHRU_BLOCKSIZE
- 1)))
2075 if (!pt
->src_dma
|| !pt
->dst_dma
)
2078 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
2079 if (pt
->mask_len
!= CCP_PASSTHRU_MASKSIZE
)
2085 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT
!= 1);
2087 memset(&op
, 0, sizeof(op
));
2089 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
2091 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
2093 op
.sb_key
= cmd_q
->sb_key
;
2095 mask
.length
= pt
->mask_len
;
2096 mask
.dma
.address
= pt
->mask
;
2097 mask
.dma
.length
= pt
->mask_len
;
2099 ret
= ccp_copy_to_sb(cmd_q
, &mask
, op
.jobid
, op
.sb_key
,
2100 CCP_PASSTHRU_BYTESWAP_NOOP
);
2102 cmd
->engine_error
= cmd_q
->cmd_error
;
2107 /* Send data to the CCP Passthru engine */
2111 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
2112 op
.src
.u
.dma
.address
= pt
->src_dma
;
2113 op
.src
.u
.dma
.offset
= 0;
2114 op
.src
.u
.dma
.length
= pt
->src_len
;
2116 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
2117 op
.dst
.u
.dma
.address
= pt
->dst_dma
;
2118 op
.dst
.u
.dma
.offset
= 0;
2119 op
.dst
.u
.dma
.length
= pt
->src_len
;
2121 ret
= cmd_q
->ccp
->vdata
->perform
->passthru(&op
);
2123 cmd
->engine_error
= cmd_q
->cmd_error
;
2128 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2130 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
2131 struct ccp_dm_workarea src
, dst
;
2136 if (!ecc
->u
.mm
.operand_1
||
2137 (ecc
->u
.mm
.operand_1_len
> CCP_ECC_MODULUS_BYTES
))
2140 if (ecc
->function
!= CCP_ECC_FUNCTION_MINV_384BIT
)
2141 if (!ecc
->u
.mm
.operand_2
||
2142 (ecc
->u
.mm
.operand_2_len
> CCP_ECC_MODULUS_BYTES
))
2145 if (!ecc
->u
.mm
.result
||
2146 (ecc
->u
.mm
.result_len
< CCP_ECC_MODULUS_BYTES
))
2149 memset(&op
, 0, sizeof(op
));
2151 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
2153 /* Concatenate the modulus and the operands. Both the modulus and
2154 * the operands must be in little endian format. Since the input
2155 * is in big endian format it must be converted and placed in a
2156 * fixed length buffer.
2158 ret
= ccp_init_dm_workarea(&src
, cmd_q
, CCP_ECC_SRC_BUF_SIZE
,
2163 /* Save the workarea address since it is updated in order to perform
2168 /* Copy the ECC modulus */
2169 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->mod
, 0, ecc
->mod_len
);
2172 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2174 /* Copy the first operand */
2175 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.mm
.operand_1
, 0,
2176 ecc
->u
.mm
.operand_1_len
);
2179 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2181 if (ecc
->function
!= CCP_ECC_FUNCTION_MINV_384BIT
) {
2182 /* Copy the second operand */
2183 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.mm
.operand_2
, 0,
2184 ecc
->u
.mm
.operand_2_len
);
2187 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2190 /* Restore the workarea address */
2193 /* Prepare the output area for the operation */
2194 ret
= ccp_init_dm_workarea(&dst
, cmd_q
, CCP_ECC_DST_BUF_SIZE
,
2200 op
.src
.u
.dma
.address
= src
.dma
.address
;
2201 op
.src
.u
.dma
.offset
= 0;
2202 op
.src
.u
.dma
.length
= src
.length
;
2203 op
.dst
.u
.dma
.address
= dst
.dma
.address
;
2204 op
.dst
.u
.dma
.offset
= 0;
2205 op
.dst
.u
.dma
.length
= dst
.length
;
2207 op
.u
.ecc
.function
= cmd
->u
.ecc
.function
;
2209 ret
= cmd_q
->ccp
->vdata
->perform
->ecc(&op
);
2211 cmd
->engine_error
= cmd_q
->cmd_error
;
2215 ecc
->ecc_result
= le16_to_cpup(
2216 (const __le16
*)(dst
.address
+ CCP_ECC_RESULT_OFFSET
));
2217 if (!(ecc
->ecc_result
& CCP_ECC_RESULT_SUCCESS
)) {
2222 /* Save the ECC result */
2223 ccp_reverse_get_dm_area(&dst
, 0, ecc
->u
.mm
.result
, 0,
2224 CCP_ECC_MODULUS_BYTES
);
2235 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2237 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
2238 struct ccp_dm_workarea src
, dst
;
2243 if (!ecc
->u
.pm
.point_1
.x
||
2244 (ecc
->u
.pm
.point_1
.x_len
> CCP_ECC_MODULUS_BYTES
) ||
2245 !ecc
->u
.pm
.point_1
.y
||
2246 (ecc
->u
.pm
.point_1
.y_len
> CCP_ECC_MODULUS_BYTES
))
2249 if (ecc
->function
== CCP_ECC_FUNCTION_PADD_384BIT
) {
2250 if (!ecc
->u
.pm
.point_2
.x
||
2251 (ecc
->u
.pm
.point_2
.x_len
> CCP_ECC_MODULUS_BYTES
) ||
2252 !ecc
->u
.pm
.point_2
.y
||
2253 (ecc
->u
.pm
.point_2
.y_len
> CCP_ECC_MODULUS_BYTES
))
2256 if (!ecc
->u
.pm
.domain_a
||
2257 (ecc
->u
.pm
.domain_a_len
> CCP_ECC_MODULUS_BYTES
))
2260 if (ecc
->function
== CCP_ECC_FUNCTION_PMUL_384BIT
)
2261 if (!ecc
->u
.pm
.scalar
||
2262 (ecc
->u
.pm
.scalar_len
> CCP_ECC_MODULUS_BYTES
))
2266 if (!ecc
->u
.pm
.result
.x
||
2267 (ecc
->u
.pm
.result
.x_len
< CCP_ECC_MODULUS_BYTES
) ||
2268 !ecc
->u
.pm
.result
.y
||
2269 (ecc
->u
.pm
.result
.y_len
< CCP_ECC_MODULUS_BYTES
))
2272 memset(&op
, 0, sizeof(op
));
2274 op
.jobid
= CCP_NEW_JOBID(cmd_q
->ccp
);
2276 /* Concatenate the modulus and the operands. Both the modulus and
2277 * the operands must be in little endian format. Since the input
2278 * is in big endian format it must be converted and placed in a
2279 * fixed length buffer.
2281 ret
= ccp_init_dm_workarea(&src
, cmd_q
, CCP_ECC_SRC_BUF_SIZE
,
2286 /* Save the workarea address since it is updated in order to perform
2291 /* Copy the ECC modulus */
2292 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->mod
, 0, ecc
->mod_len
);
2295 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2297 /* Copy the first point X and Y coordinate */
2298 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.pm
.point_1
.x
, 0,
2299 ecc
->u
.pm
.point_1
.x_len
);
2302 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2303 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.pm
.point_1
.y
, 0,
2304 ecc
->u
.pm
.point_1
.y_len
);
2307 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2309 /* Set the first point Z coordinate to 1 */
2310 *src
.address
= 0x01;
2311 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2313 if (ecc
->function
== CCP_ECC_FUNCTION_PADD_384BIT
) {
2314 /* Copy the second point X and Y coordinate */
2315 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.pm
.point_2
.x
, 0,
2316 ecc
->u
.pm
.point_2
.x_len
);
2319 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2320 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.pm
.point_2
.y
, 0,
2321 ecc
->u
.pm
.point_2
.y_len
);
2324 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2326 /* Set the second point Z coordinate to 1 */
2327 *src
.address
= 0x01;
2328 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2330 /* Copy the Domain "a" parameter */
2331 ret
= ccp_reverse_set_dm_area(&src
, 0, ecc
->u
.pm
.domain_a
, 0,
2332 ecc
->u
.pm
.domain_a_len
);
2335 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2337 if (ecc
->function
== CCP_ECC_FUNCTION_PMUL_384BIT
) {
2338 /* Copy the scalar value */
2339 ret
= ccp_reverse_set_dm_area(&src
, 0,
2340 ecc
->u
.pm
.scalar
, 0,
2341 ecc
->u
.pm
.scalar_len
);
2344 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2348 /* Restore the workarea address */
2351 /* Prepare the output area for the operation */
2352 ret
= ccp_init_dm_workarea(&dst
, cmd_q
, CCP_ECC_DST_BUF_SIZE
,
2358 op
.src
.u
.dma
.address
= src
.dma
.address
;
2359 op
.src
.u
.dma
.offset
= 0;
2360 op
.src
.u
.dma
.length
= src
.length
;
2361 op
.dst
.u
.dma
.address
= dst
.dma
.address
;
2362 op
.dst
.u
.dma
.offset
= 0;
2363 op
.dst
.u
.dma
.length
= dst
.length
;
2365 op
.u
.ecc
.function
= cmd
->u
.ecc
.function
;
2367 ret
= cmd_q
->ccp
->vdata
->perform
->ecc(&op
);
2369 cmd
->engine_error
= cmd_q
->cmd_error
;
2373 ecc
->ecc_result
= le16_to_cpup(
2374 (const __le16
*)(dst
.address
+ CCP_ECC_RESULT_OFFSET
));
2375 if (!(ecc
->ecc_result
& CCP_ECC_RESULT_SUCCESS
)) {
2380 /* Save the workarea address since it is updated as we walk through
2381 * to copy the point math result
2385 /* Save the ECC result X and Y coordinates */
2386 ccp_reverse_get_dm_area(&dst
, 0, ecc
->u
.pm
.result
.x
, 0,
2387 CCP_ECC_MODULUS_BYTES
);
2388 dst
.address
+= CCP_ECC_OUTPUT_SIZE
;
2389 ccp_reverse_get_dm_area(&dst
, 0, ecc
->u
.pm
.result
.y
, 0,
2390 CCP_ECC_MODULUS_BYTES
);
2391 dst
.address
+= CCP_ECC_OUTPUT_SIZE
;
2393 /* Restore the workarea address */
2405 static int ccp_run_ecc_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2407 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
2409 ecc
->ecc_result
= 0;
2412 (ecc
->mod_len
> CCP_ECC_MODULUS_BYTES
))
2415 switch (ecc
->function
) {
2416 case CCP_ECC_FUNCTION_MMUL_384BIT
:
2417 case CCP_ECC_FUNCTION_MADD_384BIT
:
2418 case CCP_ECC_FUNCTION_MINV_384BIT
:
2419 return ccp_run_ecc_mm_cmd(cmd_q
, cmd
);
2421 case CCP_ECC_FUNCTION_PADD_384BIT
:
2422 case CCP_ECC_FUNCTION_PMUL_384BIT
:
2423 case CCP_ECC_FUNCTION_PDBL_384BIT
:
2424 return ccp_run_ecc_pm_cmd(cmd_q
, cmd
);
2431 int ccp_run_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2435 cmd
->engine_error
= 0;
2436 cmd_q
->cmd_error
= 0;
2437 cmd_q
->int_rcvd
= 0;
2438 cmd_q
->free_slots
= cmd_q
->ccp
->vdata
->perform
->get_free_slots(cmd_q
);
2440 switch (cmd
->engine
) {
2441 case CCP_ENGINE_AES
:
2442 ret
= ccp_run_aes_cmd(cmd_q
, cmd
);
2444 case CCP_ENGINE_XTS_AES_128
:
2445 ret
= ccp_run_xts_aes_cmd(cmd_q
, cmd
);
2447 case CCP_ENGINE_DES3
:
2448 ret
= ccp_run_des3_cmd(cmd_q
, cmd
);
2450 case CCP_ENGINE_SHA
:
2451 ret
= ccp_run_sha_cmd(cmd_q
, cmd
);
2453 case CCP_ENGINE_RSA
:
2454 ret
= ccp_run_rsa_cmd(cmd_q
, cmd
);
2456 case CCP_ENGINE_PASSTHRU
:
2457 if (cmd
->flags
& CCP_CMD_PASSTHRU_NO_DMA_MAP
)
2458 ret
= ccp_run_passthru_nomap_cmd(cmd_q
, cmd
);
2460 ret
= ccp_run_passthru_cmd(cmd_q
, cmd
);
2462 case CCP_ENGINE_ECC
:
2463 ret
= ccp_run_ecc_cmd(cmd_q
, cmd
);