1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/stmp_device.h>
17 #include <linux/clk.h>
19 #include <crypto/aes.h>
20 #include <crypto/sha.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/skcipher.h>
23 #include <crypto/scatterwalk.h>
25 #define DCP_MAX_CHANS 4
26 #define DCP_BUF_SZ PAGE_SIZE
27 #define DCP_SHA_PAY_SZ 64
29 #define DCP_ALIGNMENT 64
32 * Null hashes to align with hw behavior on imx6sl and ull
33 * these are flipped for consistency with hw output
35 static const uint8_t sha1_null_hash
[] =
36 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
37 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
39 static const uint8_t sha256_null_hash
[] =
40 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
41 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
42 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
43 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
45 /* DCP DMA descriptor. */
47 uint32_t next_cmd_addr
;
57 /* Coherent aligned block for bounce buffering. */
58 struct dcp_coherent_block
{
59 uint8_t aes_in_buf
[DCP_BUF_SZ
];
60 uint8_t aes_out_buf
[DCP_BUF_SZ
];
61 uint8_t sha_in_buf
[DCP_BUF_SZ
];
62 uint8_t sha_out_buf
[DCP_SHA_PAY_SZ
];
64 uint8_t aes_key
[2 * AES_KEYSIZE_128
];
66 struct dcp_dma_desc desc
[DCP_MAX_CHANS
];
75 struct dcp_coherent_block
*coh
;
77 struct completion completion
[DCP_MAX_CHANS
];
78 spinlock_t lock
[DCP_MAX_CHANS
];
79 struct task_struct
*thread
[DCP_MAX_CHANS
];
80 struct crypto_queue queue
[DCP_MAX_CHANS
];
85 DCP_CHAN_HASH_SHA
= 0,
89 struct dcp_async_ctx
{
94 /* SHA Hash-specific context */
99 /* Crypto-specific context */
100 struct crypto_skcipher
*fallback
;
101 unsigned int key_len
;
102 uint8_t key
[AES_KEYSIZE_128
];
105 struct dcp_aes_req_ctx
{
108 struct skcipher_request fallback_req
; // keep at the end
111 struct dcp_sha_req_ctx
{
116 struct dcp_export_state
{
117 struct dcp_sha_req_ctx req_ctx
;
118 struct dcp_async_ctx async_ctx
;
122 * There can even be only one instance of the MXS DCP due to the
123 * design of Linux Crypto API.
125 static struct dcp
*global_sdcp
;
127 /* DCP register layout. */
128 #define MXS_DCP_CTRL 0x00
129 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
130 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
132 #define MXS_DCP_STAT 0x10
133 #define MXS_DCP_STAT_CLR 0x18
134 #define MXS_DCP_STAT_IRQ_MASK 0xf
136 #define MXS_DCP_CHANNELCTRL 0x20
137 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
139 #define MXS_DCP_CAPABILITY1 0x40
140 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
141 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
142 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
144 #define MXS_DCP_CONTEXT 0x50
146 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
148 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
150 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
151 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
153 /* DMA descriptor bits. */
154 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
155 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
156 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
157 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
158 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
159 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
160 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
161 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
162 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
164 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
165 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
166 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
167 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
168 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
170 static int mxs_dcp_start_dma(struct dcp_async_ctx
*actx
)
172 struct dcp
*sdcp
= global_sdcp
;
173 const int chan
= actx
->chan
;
176 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
178 dma_addr_t desc_phys
= dma_map_single(sdcp
->dev
, desc
, sizeof(*desc
),
181 reinit_completion(&sdcp
->completion
[chan
]);
183 /* Clear status register. */
184 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(chan
));
186 /* Load the DMA descriptor. */
187 writel(desc_phys
, sdcp
->base
+ MXS_DCP_CH_N_CMDPTR(chan
));
189 /* Increment the semaphore to start the DMA transfer. */
190 writel(1, sdcp
->base
+ MXS_DCP_CH_N_SEMA(chan
));
192 ret
= wait_for_completion_timeout(&sdcp
->completion
[chan
],
193 msecs_to_jiffies(1000));
195 dev_err(sdcp
->dev
, "Channel %i timeout (DCP_STAT=0x%08x)\n",
196 chan
, readl(sdcp
->base
+ MXS_DCP_STAT
));
200 stat
= readl(sdcp
->base
+ MXS_DCP_CH_N_STAT(chan
));
202 dev_err(sdcp
->dev
, "Channel %i error (CH_STAT=0x%08x)\n",
207 dma_unmap_single(sdcp
->dev
, desc_phys
, sizeof(*desc
), DMA_TO_DEVICE
);
213 * Encryption (AES128)
215 static int mxs_dcp_run_aes(struct dcp_async_ctx
*actx
,
216 struct skcipher_request
*req
, int init
)
218 struct dcp
*sdcp
= global_sdcp
;
219 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
220 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
223 dma_addr_t key_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_key
,
226 dma_addr_t src_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_in_buf
,
227 DCP_BUF_SZ
, DMA_TO_DEVICE
);
228 dma_addr_t dst_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_out_buf
,
229 DCP_BUF_SZ
, DMA_FROM_DEVICE
);
231 if (actx
->fill
% AES_BLOCK_SIZE
) {
232 dev_err(sdcp
->dev
, "Invalid block size!\n");
237 /* Fill in the DMA descriptor. */
238 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
239 MXS_DCP_CONTROL0_INTERRUPT
|
240 MXS_DCP_CONTROL0_ENABLE_CIPHER
;
242 /* Payload contains the key. */
243 desc
->control0
|= MXS_DCP_CONTROL0_PAYLOAD_KEY
;
246 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_ENCRYPT
;
248 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_INIT
;
250 desc
->control1
= MXS_DCP_CONTROL1_CIPHER_SELECT_AES128
;
253 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_ECB
;
255 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_CBC
;
257 desc
->next_cmd_addr
= 0;
258 desc
->source
= src_phys
;
259 desc
->destination
= dst_phys
;
260 desc
->size
= actx
->fill
;
261 desc
->payload
= key_phys
;
264 ret
= mxs_dcp_start_dma(actx
);
267 dma_unmap_single(sdcp
->dev
, key_phys
, 2 * AES_KEYSIZE_128
,
269 dma_unmap_single(sdcp
->dev
, src_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
270 dma_unmap_single(sdcp
->dev
, dst_phys
, DCP_BUF_SZ
, DMA_FROM_DEVICE
);
275 static int mxs_dcp_aes_block_crypt(struct crypto_async_request
*arq
)
277 struct dcp
*sdcp
= global_sdcp
;
279 struct skcipher_request
*req
= skcipher_request_cast(arq
);
280 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
281 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
283 struct scatterlist
*dst
= req
->dst
;
284 struct scatterlist
*src
= req
->src
;
285 const int nents
= sg_nents(req
->src
);
287 const int out_off
= DCP_BUF_SZ
;
288 uint8_t *in_buf
= sdcp
->coh
->aes_in_buf
;
289 uint8_t *out_buf
= sdcp
->coh
->aes_out_buf
;
291 uint8_t *out_tmp
, *src_buf
, *dst_buf
= NULL
;
292 uint32_t dst_off
= 0;
293 uint32_t last_out_len
= 0;
295 uint8_t *key
= sdcp
->coh
->aes_key
;
299 unsigned int i
, len
, clen
, rem
= 0, tlen
= 0;
301 bool limit_hit
= false;
305 /* Copy the key from the temporary location. */
306 memcpy(key
, actx
->key
, actx
->key_len
);
309 /* Copy the CBC IV just past the key. */
310 memcpy(key
+ AES_KEYSIZE_128
, req
->iv
, AES_KEYSIZE_128
);
311 /* CBC needs the INIT set. */
314 memset(key
+ AES_KEYSIZE_128
, 0, AES_KEYSIZE_128
);
317 for_each_sg(req
->src
, src
, nents
, i
) {
318 src_buf
= sg_virt(src
);
319 len
= sg_dma_len(src
);
321 limit_hit
= tlen
> req
->cryptlen
;
324 len
= req
->cryptlen
- (tlen
- len
);
327 if (actx
->fill
+ len
> out_off
)
328 clen
= out_off
- actx
->fill
;
332 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
338 * If we filled the buffer or this is the last SG,
341 if (actx
->fill
== out_off
|| sg_is_last(src
) ||
343 ret
= mxs_dcp_run_aes(actx
, req
, init
);
349 last_out_len
= actx
->fill
;
350 while (dst
&& actx
->fill
) {
352 dst_buf
= sg_virt(dst
);
355 rem
= min(sg_dma_len(dst
) - dst_off
,
358 memcpy(dst_buf
+ dst_off
, out_tmp
, rem
);
363 if (dst_off
== sg_dma_len(dst
)) {
377 /* Copy the IV for CBC for chaining */
380 memcpy(req
->iv
, out_buf
+(last_out_len
-AES_BLOCK_SIZE
),
383 memcpy(req
->iv
, in_buf
+(last_out_len
-AES_BLOCK_SIZE
),
390 static int dcp_chan_thread_aes(void *data
)
392 struct dcp
*sdcp
= global_sdcp
;
393 const int chan
= DCP_CHAN_CRYPTO
;
395 struct crypto_async_request
*backlog
;
396 struct crypto_async_request
*arq
;
400 while (!kthread_should_stop()) {
401 set_current_state(TASK_INTERRUPTIBLE
);
403 spin_lock(&sdcp
->lock
[chan
]);
404 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
405 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
406 spin_unlock(&sdcp
->lock
[chan
]);
408 if (!backlog
&& !arq
) {
413 set_current_state(TASK_RUNNING
);
416 backlog
->complete(backlog
, -EINPROGRESS
);
419 ret
= mxs_dcp_aes_block_crypt(arq
);
420 arq
->complete(arq
, ret
);
427 static int mxs_dcp_block_fallback(struct skcipher_request
*req
, int enc
)
429 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
430 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
431 struct dcp_async_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
434 skcipher_request_set_tfm(&rctx
->fallback_req
, ctx
->fallback
);
435 skcipher_request_set_callback(&rctx
->fallback_req
, req
->base
.flags
,
436 req
->base
.complete
, req
->base
.data
);
437 skcipher_request_set_crypt(&rctx
->fallback_req
, req
->src
, req
->dst
,
438 req
->cryptlen
, req
->iv
);
441 ret
= crypto_skcipher_encrypt(&rctx
->fallback_req
);
443 ret
= crypto_skcipher_decrypt(&rctx
->fallback_req
);
448 static int mxs_dcp_aes_enqueue(struct skcipher_request
*req
, int enc
, int ecb
)
450 struct dcp
*sdcp
= global_sdcp
;
451 struct crypto_async_request
*arq
= &req
->base
;
452 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
453 struct dcp_aes_req_ctx
*rctx
= skcipher_request_ctx(req
);
456 if (unlikely(actx
->key_len
!= AES_KEYSIZE_128
))
457 return mxs_dcp_block_fallback(req
, enc
);
461 actx
->chan
= DCP_CHAN_CRYPTO
;
463 spin_lock(&sdcp
->lock
[actx
->chan
]);
464 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
465 spin_unlock(&sdcp
->lock
[actx
->chan
]);
467 wake_up_process(sdcp
->thread
[actx
->chan
]);
472 static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request
*req
)
474 return mxs_dcp_aes_enqueue(req
, 0, 1);
477 static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request
*req
)
479 return mxs_dcp_aes_enqueue(req
, 1, 1);
482 static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request
*req
)
484 return mxs_dcp_aes_enqueue(req
, 0, 0);
487 static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request
*req
)
489 return mxs_dcp_aes_enqueue(req
, 1, 0);
492 static int mxs_dcp_aes_setkey(struct crypto_skcipher
*tfm
, const u8
*key
,
495 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
498 * AES 128 is supposed by the hardware, store key into temporary
499 * buffer and exit. We must use the temporary buffer here, since
500 * there can still be an operation in progress.
503 if (len
== AES_KEYSIZE_128
) {
504 memcpy(actx
->key
, key
, len
);
509 * If the requested AES key size is not supported by the hardware,
510 * but is supported by in-kernel software implementation, we use
513 crypto_skcipher_clear_flags(actx
->fallback
, CRYPTO_TFM_REQ_MASK
);
514 crypto_skcipher_set_flags(actx
->fallback
,
515 tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
516 return crypto_skcipher_setkey(actx
->fallback
, key
, len
);
519 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher
*tfm
)
521 const char *name
= crypto_tfm_alg_name(crypto_skcipher_tfm(tfm
));
522 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
523 struct crypto_skcipher
*blk
;
525 blk
= crypto_alloc_skcipher(name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
529 actx
->fallback
= blk
;
530 crypto_skcipher_set_reqsize(tfm
, sizeof(struct dcp_aes_req_ctx
) +
531 crypto_skcipher_reqsize(blk
));
535 static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher
*tfm
)
537 struct dcp_async_ctx
*actx
= crypto_skcipher_ctx(tfm
);
539 crypto_free_skcipher(actx
->fallback
);
543 * Hashing (SHA1/SHA256)
545 static int mxs_dcp_run_sha(struct ahash_request
*req
)
547 struct dcp
*sdcp
= global_sdcp
;
550 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
551 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
552 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
553 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
555 dma_addr_t digest_phys
= 0;
556 dma_addr_t buf_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_in_buf
,
557 DCP_BUF_SZ
, DMA_TO_DEVICE
);
559 /* Fill in the DMA descriptor. */
560 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
561 MXS_DCP_CONTROL0_INTERRUPT
|
562 MXS_DCP_CONTROL0_ENABLE_HASH
;
564 desc
->control0
|= MXS_DCP_CONTROL0_HASH_INIT
;
566 desc
->control1
= actx
->alg
;
567 desc
->next_cmd_addr
= 0;
568 desc
->source
= buf_phys
;
569 desc
->destination
= 0;
570 desc
->size
= actx
->fill
;
575 * Align driver with hw behavior when generating null hashes
577 if (rctx
->init
&& rctx
->fini
&& desc
->size
== 0) {
578 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
579 const uint8_t *sha_buf
=
580 (actx
->alg
== MXS_DCP_CONTROL1_HASH_SELECT_SHA1
) ?
581 sha1_null_hash
: sha256_null_hash
;
582 memcpy(sdcp
->coh
->sha_out_buf
, sha_buf
, halg
->digestsize
);
587 /* Set HASH_TERM bit for last transfer block. */
589 digest_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_out_buf
,
590 DCP_SHA_PAY_SZ
, DMA_FROM_DEVICE
);
591 desc
->control0
|= MXS_DCP_CONTROL0_HASH_TERM
;
592 desc
->payload
= digest_phys
;
595 ret
= mxs_dcp_start_dma(actx
);
598 dma_unmap_single(sdcp
->dev
, digest_phys
, DCP_SHA_PAY_SZ
,
602 dma_unmap_single(sdcp
->dev
, buf_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
607 static int dcp_sha_req_to_buf(struct crypto_async_request
*arq
)
609 struct dcp
*sdcp
= global_sdcp
;
611 struct ahash_request
*req
= ahash_request_cast(arq
);
612 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
613 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
614 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
615 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
617 uint8_t *in_buf
= sdcp
->coh
->sha_in_buf
;
618 uint8_t *out_buf
= sdcp
->coh
->sha_out_buf
;
620 struct scatterlist
*src
;
622 unsigned int i
, len
, clen
, oft
= 0;
625 int fin
= rctx
->fini
;
633 if (actx
->fill
+ len
> DCP_BUF_SZ
)
634 clen
= DCP_BUF_SZ
- actx
->fill
;
638 scatterwalk_map_and_copy(in_buf
+ actx
->fill
, src
, oft
, clen
,
646 * If we filled the buffer and still have some
647 * more data, submit the buffer.
649 if (len
&& actx
->fill
== DCP_BUF_SZ
) {
650 ret
= mxs_dcp_run_sha(req
);
661 /* Submit whatever is left. */
665 ret
= mxs_dcp_run_sha(req
);
671 /* For some reason the result is flipped */
672 for (i
= 0; i
< halg
->digestsize
; i
++)
673 req
->result
[i
] = out_buf
[halg
->digestsize
- i
- 1];
679 static int dcp_chan_thread_sha(void *data
)
681 struct dcp
*sdcp
= global_sdcp
;
682 const int chan
= DCP_CHAN_HASH_SHA
;
684 struct crypto_async_request
*backlog
;
685 struct crypto_async_request
*arq
;
688 while (!kthread_should_stop()) {
689 set_current_state(TASK_INTERRUPTIBLE
);
691 spin_lock(&sdcp
->lock
[chan
]);
692 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
693 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
694 spin_unlock(&sdcp
->lock
[chan
]);
696 if (!backlog
&& !arq
) {
701 set_current_state(TASK_RUNNING
);
704 backlog
->complete(backlog
, -EINPROGRESS
);
707 ret
= dcp_sha_req_to_buf(arq
);
708 arq
->complete(arq
, ret
);
715 static int dcp_sha_init(struct ahash_request
*req
)
717 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
718 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
720 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
723 * Start hashing session. The code below only inits the
724 * hashing session context, nothing more.
726 memset(actx
, 0, sizeof(*actx
));
728 if (strcmp(halg
->base
.cra_name
, "sha1") == 0)
729 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA1
;
731 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA256
;
735 actx
->chan
= DCP_CHAN_HASH_SHA
;
737 mutex_init(&actx
->mutex
);
742 static int dcp_sha_update_fx(struct ahash_request
*req
, int fini
)
744 struct dcp
*sdcp
= global_sdcp
;
746 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
747 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
748 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
753 * Ignore requests that have no data in them and are not
754 * the trailing requests in the stream of requests.
756 if (!req
->nbytes
&& !fini
)
759 mutex_lock(&actx
->mutex
);
768 spin_lock(&sdcp
->lock
[actx
->chan
]);
769 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
770 spin_unlock(&sdcp
->lock
[actx
->chan
]);
772 wake_up_process(sdcp
->thread
[actx
->chan
]);
773 mutex_unlock(&actx
->mutex
);
778 static int dcp_sha_update(struct ahash_request
*req
)
780 return dcp_sha_update_fx(req
, 0);
783 static int dcp_sha_final(struct ahash_request
*req
)
785 ahash_request_set_crypt(req
, NULL
, req
->result
, 0);
787 return dcp_sha_update_fx(req
, 1);
790 static int dcp_sha_finup(struct ahash_request
*req
)
792 return dcp_sha_update_fx(req
, 1);
795 static int dcp_sha_digest(struct ahash_request
*req
)
799 ret
= dcp_sha_init(req
);
803 return dcp_sha_finup(req
);
806 static int dcp_sha_import(struct ahash_request
*req
, const void *in
)
808 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
809 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
810 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
811 const struct dcp_export_state
*export
= in
;
813 memset(rctx
, 0, sizeof(struct dcp_sha_req_ctx
));
814 memset(actx
, 0, sizeof(struct dcp_async_ctx
));
815 memcpy(rctx
, &export
->req_ctx
, sizeof(struct dcp_sha_req_ctx
));
816 memcpy(actx
, &export
->async_ctx
, sizeof(struct dcp_async_ctx
));
821 static int dcp_sha_export(struct ahash_request
*req
, void *out
)
823 struct dcp_sha_req_ctx
*rctx_state
= ahash_request_ctx(req
);
824 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
825 struct dcp_async_ctx
*actx_state
= crypto_ahash_ctx(tfm
);
826 struct dcp_export_state
*export
= out
;
828 memcpy(&export
->req_ctx
, rctx_state
, sizeof(struct dcp_sha_req_ctx
));
829 memcpy(&export
->async_ctx
, actx_state
, sizeof(struct dcp_async_ctx
));
834 static int dcp_sha_cra_init(struct crypto_tfm
*tfm
)
836 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
837 sizeof(struct dcp_sha_req_ctx
));
841 static void dcp_sha_cra_exit(struct crypto_tfm
*tfm
)
845 /* AES 128 ECB and AES 128 CBC */
846 static struct skcipher_alg dcp_aes_algs
[] = {
848 .base
.cra_name
= "ecb(aes)",
849 .base
.cra_driver_name
= "ecb-aes-dcp",
850 .base
.cra_priority
= 400,
851 .base
.cra_alignmask
= 15,
852 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
853 CRYPTO_ALG_NEED_FALLBACK
,
854 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
855 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
856 .base
.cra_module
= THIS_MODULE
,
858 .min_keysize
= AES_MIN_KEY_SIZE
,
859 .max_keysize
= AES_MAX_KEY_SIZE
,
860 .setkey
= mxs_dcp_aes_setkey
,
861 .encrypt
= mxs_dcp_aes_ecb_encrypt
,
862 .decrypt
= mxs_dcp_aes_ecb_decrypt
,
863 .init
= mxs_dcp_aes_fallback_init_tfm
,
864 .exit
= mxs_dcp_aes_fallback_exit_tfm
,
866 .base
.cra_name
= "cbc(aes)",
867 .base
.cra_driver_name
= "cbc-aes-dcp",
868 .base
.cra_priority
= 400,
869 .base
.cra_alignmask
= 15,
870 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
871 CRYPTO_ALG_NEED_FALLBACK
,
872 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
873 .base
.cra_ctxsize
= sizeof(struct dcp_async_ctx
),
874 .base
.cra_module
= THIS_MODULE
,
876 .min_keysize
= AES_MIN_KEY_SIZE
,
877 .max_keysize
= AES_MAX_KEY_SIZE
,
878 .setkey
= mxs_dcp_aes_setkey
,
879 .encrypt
= mxs_dcp_aes_cbc_encrypt
,
880 .decrypt
= mxs_dcp_aes_cbc_decrypt
,
881 .ivsize
= AES_BLOCK_SIZE
,
882 .init
= mxs_dcp_aes_fallback_init_tfm
,
883 .exit
= mxs_dcp_aes_fallback_exit_tfm
,
888 static struct ahash_alg dcp_sha1_alg
= {
889 .init
= dcp_sha_init
,
890 .update
= dcp_sha_update
,
891 .final
= dcp_sha_final
,
892 .finup
= dcp_sha_finup
,
893 .digest
= dcp_sha_digest
,
894 .import
= dcp_sha_import
,
895 .export
= dcp_sha_export
,
897 .digestsize
= SHA1_DIGEST_SIZE
,
898 .statesize
= sizeof(struct dcp_export_state
),
901 .cra_driver_name
= "sha1-dcp",
904 .cra_flags
= CRYPTO_ALG_ASYNC
,
905 .cra_blocksize
= SHA1_BLOCK_SIZE
,
906 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
907 .cra_module
= THIS_MODULE
,
908 .cra_init
= dcp_sha_cra_init
,
909 .cra_exit
= dcp_sha_cra_exit
,
915 static struct ahash_alg dcp_sha256_alg
= {
916 .init
= dcp_sha_init
,
917 .update
= dcp_sha_update
,
918 .final
= dcp_sha_final
,
919 .finup
= dcp_sha_finup
,
920 .digest
= dcp_sha_digest
,
921 .import
= dcp_sha_import
,
922 .export
= dcp_sha_export
,
924 .digestsize
= SHA256_DIGEST_SIZE
,
925 .statesize
= sizeof(struct dcp_export_state
),
927 .cra_name
= "sha256",
928 .cra_driver_name
= "sha256-dcp",
931 .cra_flags
= CRYPTO_ALG_ASYNC
,
932 .cra_blocksize
= SHA256_BLOCK_SIZE
,
933 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
934 .cra_module
= THIS_MODULE
,
935 .cra_init
= dcp_sha_cra_init
,
936 .cra_exit
= dcp_sha_cra_exit
,
941 static irqreturn_t
mxs_dcp_irq(int irq
, void *context
)
943 struct dcp
*sdcp
= context
;
947 stat
= readl(sdcp
->base
+ MXS_DCP_STAT
);
948 stat
&= MXS_DCP_STAT_IRQ_MASK
;
952 /* Clear the interrupts. */
953 writel(stat
, sdcp
->base
+ MXS_DCP_STAT_CLR
);
955 /* Complete the DMA requests that finished. */
956 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
958 complete(&sdcp
->completion
[i
]);
963 static int mxs_dcp_probe(struct platform_device
*pdev
)
965 struct device
*dev
= &pdev
->dev
;
966 struct dcp
*sdcp
= NULL
;
968 int dcp_vmi_irq
, dcp_irq
;
971 dev_err(dev
, "Only one DCP instance allowed!\n");
975 dcp_vmi_irq
= platform_get_irq(pdev
, 0);
979 dcp_irq
= platform_get_irq(pdev
, 1);
983 sdcp
= devm_kzalloc(dev
, sizeof(*sdcp
), GFP_KERNEL
);
988 sdcp
->base
= devm_platform_ioremap_resource(pdev
, 0);
989 if (IS_ERR(sdcp
->base
))
990 return PTR_ERR(sdcp
->base
);
993 ret
= devm_request_irq(dev
, dcp_vmi_irq
, mxs_dcp_irq
, 0,
994 "dcp-vmi-irq", sdcp
);
996 dev_err(dev
, "Failed to claim DCP VMI IRQ!\n");
1000 ret
= devm_request_irq(dev
, dcp_irq
, mxs_dcp_irq
, 0,
1003 dev_err(dev
, "Failed to claim DCP IRQ!\n");
1007 /* Allocate coherent helper block. */
1008 sdcp
->coh
= devm_kzalloc(dev
, sizeof(*sdcp
->coh
) + DCP_ALIGNMENT
,
1013 /* Re-align the structure so it fits the DCP constraints. */
1014 sdcp
->coh
= PTR_ALIGN(sdcp
->coh
, DCP_ALIGNMENT
);
1016 /* DCP clock is optional, only used on some SOCs */
1017 sdcp
->dcp_clk
= devm_clk_get(dev
, "dcp");
1018 if (IS_ERR(sdcp
->dcp_clk
)) {
1019 if (sdcp
->dcp_clk
!= ERR_PTR(-ENOENT
))
1020 return PTR_ERR(sdcp
->dcp_clk
);
1021 sdcp
->dcp_clk
= NULL
;
1023 ret
= clk_prepare_enable(sdcp
->dcp_clk
);
1027 /* Restart the DCP block. */
1028 ret
= stmp_reset_block(sdcp
->base
);
1030 dev_err(dev
, "Failed reset\n");
1031 goto err_disable_unprepare_clk
;
1034 /* Initialize control register. */
1035 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES
|
1036 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING
| 0xf,
1037 sdcp
->base
+ MXS_DCP_CTRL
);
1039 /* Enable all DCP DMA channels. */
1040 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK
,
1041 sdcp
->base
+ MXS_DCP_CHANNELCTRL
);
1044 * We do not enable context switching. Give the context buffer a
1045 * pointer to an illegal address so if context switching is
1046 * inadvertantly enabled, the DCP will return an error instead of
1047 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1050 writel(0xffff0000, sdcp
->base
+ MXS_DCP_CONTEXT
);
1051 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
1052 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(i
));
1053 writel(0xffffffff, sdcp
->base
+ MXS_DCP_STAT_CLR
);
1057 platform_set_drvdata(pdev
, sdcp
);
1059 for (i
= 0; i
< DCP_MAX_CHANS
; i
++) {
1060 spin_lock_init(&sdcp
->lock
[i
]);
1061 init_completion(&sdcp
->completion
[i
]);
1062 crypto_init_queue(&sdcp
->queue
[i
], 50);
1065 /* Create the SHA and AES handler threads. */
1066 sdcp
->thread
[DCP_CHAN_HASH_SHA
] = kthread_run(dcp_chan_thread_sha
,
1067 NULL
, "mxs_dcp_chan/sha");
1068 if (IS_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
])) {
1069 dev_err(dev
, "Error starting SHA thread!\n");
1070 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1071 goto err_disable_unprepare_clk
;
1074 sdcp
->thread
[DCP_CHAN_CRYPTO
] = kthread_run(dcp_chan_thread_aes
,
1075 NULL
, "mxs_dcp_chan/aes");
1076 if (IS_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
])) {
1077 dev_err(dev
, "Error starting SHA thread!\n");
1078 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1079 goto err_destroy_sha_thread
;
1082 /* Register the various crypto algorithms. */
1083 sdcp
->caps
= readl(sdcp
->base
+ MXS_DCP_CAPABILITY1
);
1085 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
) {
1086 ret
= crypto_register_skciphers(dcp_aes_algs
,
1087 ARRAY_SIZE(dcp_aes_algs
));
1089 /* Failed to register algorithm. */
1090 dev_err(dev
, "Failed to register AES crypto!\n");
1091 goto err_destroy_aes_thread
;
1095 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
) {
1096 ret
= crypto_register_ahash(&dcp_sha1_alg
);
1098 dev_err(dev
, "Failed to register %s hash!\n",
1099 dcp_sha1_alg
.halg
.base
.cra_name
);
1100 goto err_unregister_aes
;
1104 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
) {
1105 ret
= crypto_register_ahash(&dcp_sha256_alg
);
1107 dev_err(dev
, "Failed to register %s hash!\n",
1108 dcp_sha256_alg
.halg
.base
.cra_name
);
1109 goto err_unregister_sha1
;
1115 err_unregister_sha1
:
1116 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1117 crypto_unregister_ahash(&dcp_sha1_alg
);
1120 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1121 crypto_unregister_skciphers(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1123 err_destroy_aes_thread
:
1124 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1126 err_destroy_sha_thread
:
1127 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1129 err_disable_unprepare_clk
:
1130 clk_disable_unprepare(sdcp
->dcp_clk
);
1135 static int mxs_dcp_remove(struct platform_device
*pdev
)
1137 struct dcp
*sdcp
= platform_get_drvdata(pdev
);
1139 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
)
1140 crypto_unregister_ahash(&dcp_sha256_alg
);
1142 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1143 crypto_unregister_ahash(&dcp_sha1_alg
);
1145 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1146 crypto_unregister_skciphers(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1148 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1149 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1151 clk_disable_unprepare(sdcp
->dcp_clk
);
1153 platform_set_drvdata(pdev
, NULL
);
1160 static const struct of_device_id mxs_dcp_dt_ids
[] = {
1161 { .compatible
= "fsl,imx23-dcp", .data
= NULL
, },
1162 { .compatible
= "fsl,imx28-dcp", .data
= NULL
, },
1166 MODULE_DEVICE_TABLE(of
, mxs_dcp_dt_ids
);
1168 static struct platform_driver mxs_dcp_driver
= {
1169 .probe
= mxs_dcp_probe
,
1170 .remove
= mxs_dcp_remove
,
1173 .of_match_table
= mxs_dcp_dt_ids
,
1177 module_platform_driver(mxs_dcp_driver
);
1179 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1180 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1181 MODULE_LICENSE("GPL");
1182 MODULE_ALIAS("platform:mxs-dcp");