4 * Support for DCP cryptographic accelerator.
7 * Author: Tobias Rauter <tobias.rauter@gmail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/dma-mapping.h>
22 #include <linux/mutex.h>
23 #include <linux/interrupt.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/delay.h>
27 #include <linux/crypto.h>
28 #include <linux/miscdevice.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/aes.h>
34 /* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
35 #define DBS_IOCTL_BASE 'd'
36 #define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
37 #define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
39 /* DCP channel used for AES */
40 #define USED_CHANNEL 1
41 /* Ring Buffers' maximum size */
42 #define DCP_MAX_PKG 20
44 /* Control Register */
45 #define DCP_REG_CTRL 0x000
46 #define DCP_CTRL_SFRST (1<<31)
47 #define DCP_CTRL_CLKGATE (1<<30)
48 #define DCP_CTRL_CRYPTO_PRESENT (1<<29)
49 #define DCP_CTRL_SHA_PRESENT (1<<28)
50 #define DCP_CTRL_GATHER_RES_WRITE (1<<23)
51 #define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
52 #define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
53 #define DCP_CTRL_CH_IRQ_E_0 0x01
54 #define DCP_CTRL_CH_IRQ_E_1 0x02
55 #define DCP_CTRL_CH_IRQ_E_2 0x04
56 #define DCP_CTRL_CH_IRQ_E_3 0x08
59 #define DCP_REG_STAT 0x010
60 #define DCP_STAT_OTP_KEY_READY (1<<28)
61 #define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
62 #define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
63 #define DCP_STAT_IRQ(stat) (stat&0x0F)
64 #define DCP_STAT_CHAN_0 (0x01)
65 #define DCP_STAT_CHAN_1 (0x02)
66 #define DCP_STAT_CHAN_2 (0x04)
67 #define DCP_STAT_CHAN_3 (0x08)
69 /* Channel Control Register */
70 #define DCP_REG_CHAN_CTRL 0x020
71 #define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
72 #define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
73 #define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
74 #define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
75 #define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
76 #define DCP_CHAN_CTRL_ENABLE_0 (0x01)
77 #define DCP_CHAN_CTRL_ENABLE_1 (0x02)
78 #define DCP_CHAN_CTRL_ENABLE_2 (0x04)
79 #define DCP_CHAN_CTRL_ENABLE_3 (0x08)
83 * The DCP has 4 channels. Each of this channels
84 * has 4 registers (command pointer, semaphore, status and options).
85 * The address of register REG of channel CHAN is obtained by
86 * dcp_chan_reg(REG, CHAN)
88 #define DCP_REG_CHAN_PTR 0x00000100
89 #define DCP_REG_CHAN_SEMA 0x00000110
90 #define DCP_REG_CHAN_STAT 0x00000120
91 #define DCP_REG_CHAN_OPT 0x00000130
93 #define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
94 #define DCP_CHAN_STAT_NO_CHAIN 0x020000
95 #define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
96 #define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
97 #define DCP_CHAN_STAT_INVALID_MODE 0x050000
98 #define DCP_CHAN_STAT_PAGEFAULT 0x40
99 #define DCP_CHAN_STAT_DST 0x20
100 #define DCP_CHAN_STAT_SRC 0x10
101 #define DCP_CHAN_STAT_PACKET 0x08
102 #define DCP_CHAN_STAT_SETUP 0x04
103 #define DCP_CHAN_STAT_MISMATCH 0x02
105 /* hw packet control*/
107 #define DCP_PKT_PAYLOAD_KEY (1<<11)
108 #define DCP_PKT_OTP_KEY (1<<10)
109 #define DCP_PKT_CIPHER_INIT (1<<9)
110 #define DCP_PKG_CIPHER_ENCRYPT (1<<8)
111 #define DCP_PKT_CIPHER_ENABLE (1<<5)
112 #define DCP_PKT_DECR_SEM (1<<1)
113 #define DCP_PKT_CHAIN (1<<2)
114 #define DCP_PKT_IRQ 1
116 #define DCP_PKT_MODE_CBC (1<<4)
117 #define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
120 #define DCP_ENC 0x0001
121 #define DCP_DEC 0x0002
122 #define DCP_ECB 0x0004
123 #define DCP_CBC 0x0008
124 #define DCP_CBC_INIT 0x0010
125 #define DCP_NEW_KEY 0x0040
126 #define DCP_OTP_KEY 0x0080
127 #define DCP_AES 0x1000
130 #define DCP_FLAG_BUSY 0x01
131 #define DCP_FLAG_PRODUCING 0x02
137 struct dcp_dev_req_ctx
{
143 u8 key
[AES_KEYSIZE_128
];
146 struct ablkcipher_request
*req
;
147 struct crypto_ablkcipher
*fallback
;
152 struct ablkcipher_walk walk
;
157 void __iomem
*dcp_regs_base
;
162 spinlock_t queue_lock
;
163 struct crypto_queue queue
;
165 uint32_t pkt_produced
;
166 uint32_t pkt_consumed
;
168 struct dcp_hw_packet
*hw_pkg
[DCP_MAX_PKG
];
169 dma_addr_t hw_phys_pkg
;
171 /* [KEY][IV] Both with 16 Bytes */
173 dma_addr_t payload_base_dma
;
176 struct tasklet_struct done_task
;
177 struct tasklet_struct queue_task
;
178 struct timer_list watchdog
;
184 struct miscdevice dcp_bootstream_misc
;
187 struct dcp_hw_packet
{
198 static struct dcp_dev
*global_dev
;
200 static inline u32
dcp_chan_reg(u32 reg
, int chan
)
202 return reg
+ (chan
) * 0x40;
205 static inline void dcp_write(struct dcp_dev
*dev
, u32 data
, u32 reg
)
207 writel(data
, dev
->dcp_regs_base
+ reg
);
210 static inline void dcp_set(struct dcp_dev
*dev
, u32 data
, u32 reg
)
212 writel(data
, dev
->dcp_regs_base
+ (reg
| 0x04));
215 static inline void dcp_clear(struct dcp_dev
*dev
, u32 data
, u32 reg
)
217 writel(data
, dev
->dcp_regs_base
+ (reg
| 0x08));
220 static inline void dcp_toggle(struct dcp_dev
*dev
, u32 data
, u32 reg
)
222 writel(data
, dev
->dcp_regs_base
+ (reg
| 0x0C));
225 static inline unsigned int dcp_read(struct dcp_dev
*dev
, u32 reg
)
227 return readl(dev
->dcp_regs_base
+ reg
);
230 static void dcp_dma_unmap(struct dcp_dev
*dev
, struct dcp_hw_packet
*pkt
)
232 dma_unmap_page(dev
->dev
, pkt
->src
, pkt
->size
, DMA_TO_DEVICE
);
233 dma_unmap_page(dev
->dev
, pkt
->dst
, pkt
->size
, DMA_FROM_DEVICE
);
234 dev_dbg(dev
->dev
, "unmap packet %x", (unsigned int) pkt
);
237 static int dcp_dma_map(struct dcp_dev
*dev
,
238 struct ablkcipher_walk
*walk
, struct dcp_hw_packet
*pkt
)
240 dev_dbg(dev
->dev
, "map packet %x", (unsigned int) pkt
);
241 /* align to length = 16 */
242 pkt
->size
= walk
->nbytes
- (walk
->nbytes
% 16);
244 pkt
->src
= dma_map_page(dev
->dev
, walk
->src
.page
, walk
->src
.offset
,
245 pkt
->size
, DMA_TO_DEVICE
);
248 dev_err(dev
->dev
, "Unable to map src");
252 pkt
->dst
= dma_map_page(dev
->dev
, walk
->dst
.page
, walk
->dst
.offset
,
253 pkt
->size
, DMA_FROM_DEVICE
);
256 dev_err(dev
->dev
, "Unable to map dst");
257 dma_unmap_page(dev
->dev
, pkt
->src
, pkt
->size
, DMA_TO_DEVICE
);
264 static void dcp_op_one(struct dcp_dev
*dev
, struct dcp_hw_packet
*pkt
,
267 struct dcp_op
*ctx
= dev
->ctx
;
268 pkt
->pkt1
= ctx
->pkt1
;
269 pkt
->pkt2
= ctx
->pkt2
;
271 pkt
->payload
= (u32
) dev
->payload_base_dma
;
274 if (ctx
->flags
& DCP_CBC_INIT
) {
275 pkt
->pkt1
|= DCP_PKT_CIPHER_INIT
;
276 ctx
->flags
&= ~DCP_CBC_INIT
;
279 mod_timer(&dev
->watchdog
, jiffies
+ msecs_to_jiffies(500));
280 pkt
->pkt1
|= DCP_PKT_IRQ
;
282 pkt
->pkt1
|= DCP_PKT_CHAIN
;
287 dcp_chan_reg(DCP_REG_CHAN_SEMA
, USED_CHANNEL
));
290 static void dcp_op_proceed(struct dcp_dev
*dev
)
292 struct dcp_op
*ctx
= dev
->ctx
;
293 struct dcp_hw_packet
*pkt
;
295 while (ctx
->walk
.nbytes
) {
298 pkt
= dev
->hw_pkg
[dev
->pkt_produced
% DCP_MAX_PKG
];
299 err
= dcp_dma_map(dev
, &ctx
->walk
, pkt
);
301 dev
->ctx
->stat
|= err
;
302 /* start timer to wait for already set up calls */
303 mod_timer(&dev
->watchdog
,
304 jiffies
+ msecs_to_jiffies(500));
309 err
= ctx
->walk
.nbytes
- pkt
->size
;
310 ablkcipher_walk_done(dev
->ctx
->req
, &dev
->ctx
->walk
, err
);
312 dcp_op_one(dev
, pkt
, ctx
->walk
.nbytes
== 0);
313 /* we have to wait if no space is left in buffer */
314 if (dev
->pkt_produced
- dev
->pkt_consumed
== DCP_MAX_PKG
)
317 clear_bit(DCP_FLAG_PRODUCING
, &dev
->flags
);
320 static void dcp_op_start(struct dcp_dev
*dev
, uint8_t use_walk
)
322 struct dcp_op
*ctx
= dev
->ctx
;
324 if (ctx
->flags
& DCP_NEW_KEY
) {
325 memcpy(dev
->payload_base
, ctx
->key
, ctx
->keylen
);
326 ctx
->flags
&= ~DCP_NEW_KEY
;
330 ctx
->pkt1
|= DCP_PKT_CIPHER_ENABLE
;
331 ctx
->pkt1
|= DCP_PKT_DECR_SEM
;
333 if (ctx
->flags
& DCP_OTP_KEY
)
334 ctx
->pkt1
|= DCP_PKT_OTP_KEY
;
336 ctx
->pkt1
|= DCP_PKT_PAYLOAD_KEY
;
338 if (ctx
->flags
& DCP_ENC
)
339 ctx
->pkt1
|= DCP_PKG_CIPHER_ENCRYPT
;
342 if (ctx
->flags
& DCP_CBC
)
343 ctx
->pkt2
|= DCP_PKT_MODE_CBC
;
345 dev
->pkt_produced
= 0;
346 dev
->pkt_consumed
= 0;
349 dcp_clear(dev
, -1, dcp_chan_reg(DCP_REG_CHAN_STAT
, USED_CHANNEL
));
350 dcp_write(dev
, (u32
) dev
->hw_phys_pkg
,
351 dcp_chan_reg(DCP_REG_CHAN_PTR
, USED_CHANNEL
));
353 set_bit(DCP_FLAG_PRODUCING
, &dev
->flags
);
356 ablkcipher_walk_init(&ctx
->walk
, ctx
->req
->dst
,
357 ctx
->req
->src
, ctx
->req
->nbytes
);
358 ablkcipher_walk_phys(ctx
->req
, &ctx
->walk
);
361 dcp_op_one(dev
, dev
->hw_pkg
[0], 1);
362 clear_bit(DCP_FLAG_PRODUCING
, &dev
->flags
);
366 static void dcp_done_task(unsigned long data
)
368 struct dcp_dev
*dev
= (struct dcp_dev
*)data
;
369 struct dcp_hw_packet
*last_packet
;
373 for (last_packet
= dev
->hw_pkg
[(dev
->pkt_consumed
) % DCP_MAX_PKG
];
374 last_packet
->stat
== 1;
376 dev
->hw_pkg
[++(dev
->pkt_consumed
) % DCP_MAX_PKG
]) {
378 dcp_dma_unmap(dev
, last_packet
);
379 last_packet
->stat
= 0;
382 /* the last call of this function already consumed this IRQ's packet */
387 "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
388 dev
->ctx
->stat
, fin
, dev
->pkt_produced
, dev
->pkt_consumed
);
390 last_packet
= dev
->hw_pkg
[(dev
->pkt_consumed
- 1) % DCP_MAX_PKG
];
391 if (!dev
->ctx
->stat
&& last_packet
->pkt1
& DCP_PKT_CHAIN
) {
392 if (!test_and_set_bit(DCP_FLAG_PRODUCING
, &dev
->flags
))
397 while (unlikely(dev
->pkt_consumed
< dev
->pkt_produced
)) {
399 dev
->hw_pkg
[dev
->pkt_consumed
++ % DCP_MAX_PKG
]);
402 if (dev
->ctx
->flags
& DCP_OTP_KEY
) {
403 /* we used the miscdevice, no walk to finish */
404 clear_bit(DCP_FLAG_BUSY
, &dev
->flags
);
408 ablkcipher_walk_complete(&dev
->ctx
->walk
);
409 dev
->ctx
->req
->base
.complete(&dev
->ctx
->req
->base
,
411 dev
->ctx
->req
= NULL
;
412 /* in case there are other requests in the queue */
413 tasklet_schedule(&dev
->queue_task
);
416 static void dcp_watchdog(unsigned long data
)
418 struct dcp_dev
*dev
= (struct dcp_dev
*)data
;
419 dev
->ctx
->stat
|= dcp_read(dev
,
420 dcp_chan_reg(DCP_REG_CHAN_STAT
, USED_CHANNEL
));
422 dev_err(dev
->dev
, "Timeout, Channel status: %x", dev
->ctx
->stat
);
425 dev
->ctx
->stat
= -ETIMEDOUT
;
431 static irqreturn_t
dcp_common_irq(int irq
, void *context
)
434 struct dcp_dev
*dev
= (struct dcp_dev
*) context
;
436 del_timer(&dev
->watchdog
);
438 msk
= DCP_STAT_IRQ(dcp_read(dev
, DCP_REG_STAT
));
439 dcp_clear(dev
, msk
, DCP_REG_STAT
);
443 dev
->ctx
->stat
|= dcp_read(dev
,
444 dcp_chan_reg(DCP_REG_CHAN_STAT
, USED_CHANNEL
));
446 if (msk
& DCP_STAT_CHAN_1
)
447 tasklet_schedule(&dev
->done_task
);
452 static irqreturn_t
dcp_vmi_irq(int irq
, void *context
)
454 return dcp_common_irq(irq
, context
);
457 static irqreturn_t
dcp_irq(int irq
, void *context
)
459 return dcp_common_irq(irq
, context
);
462 static void dcp_crypt(struct dcp_dev
*dev
, struct dcp_op
*ctx
)
466 if ((ctx
->flags
& DCP_CBC
) && ctx
->req
->info
) {
467 ctx
->flags
|= DCP_CBC_INIT
;
468 memcpy(dev
->payload_base
+ AES_KEYSIZE_128
,
469 ctx
->req
->info
, AES_KEYSIZE_128
);
472 dcp_op_start(dev
, 1);
475 static void dcp_queue_task(unsigned long data
)
477 struct dcp_dev
*dev
= (struct dcp_dev
*) data
;
478 struct crypto_async_request
*async_req
, *backlog
;
479 struct crypto_ablkcipher
*tfm
;
481 struct dcp_dev_req_ctx
*rctx
;
482 struct ablkcipher_request
*req
;
485 spin_lock_irqsave(&dev
->queue_lock
, flags
);
487 backlog
= crypto_get_backlog(&dev
->queue
);
488 async_req
= crypto_dequeue_request(&dev
->queue
);
490 spin_unlock_irqrestore(&dev
->queue_lock
, flags
);
493 goto ret_nothing_done
;
496 backlog
->complete(backlog
, -EINPROGRESS
);
498 req
= ablkcipher_request_cast(async_req
);
499 tfm
= crypto_ablkcipher_reqtfm(req
);
500 rctx
= ablkcipher_request_ctx(req
);
501 ctx
= crypto_ablkcipher_ctx(tfm
);
503 if (!req
->src
|| !req
->dst
)
504 goto ret_nothing_done
;
506 ctx
->flags
|= rctx
->mode
;
514 clear_bit(DCP_FLAG_BUSY
, &dev
->flags
);
518 static int dcp_cra_init(struct crypto_tfm
*tfm
)
520 const char *name
= tfm
->__crt_alg
->cra_name
;
521 struct dcp_op
*ctx
= crypto_tfm_ctx(tfm
);
523 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct dcp_dev_req_ctx
);
525 ctx
->fallback
= crypto_alloc_ablkcipher(name
, 0,
526 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
528 if (IS_ERR(ctx
->fallback
)) {
529 dev_err(global_dev
->dev
, "Error allocating fallback algo %s\n",
531 return PTR_ERR(ctx
->fallback
);
537 static void dcp_cra_exit(struct crypto_tfm
*tfm
)
539 struct dcp_op
*ctx
= crypto_tfm_ctx(tfm
);
542 crypto_free_ablkcipher(ctx
->fallback
);
544 ctx
->fallback
= NULL
;
547 /* async interface */
548 static int dcp_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
551 struct dcp_op
*ctx
= crypto_ablkcipher_ctx(tfm
);
552 unsigned int ret
= 0;
555 if (len
== AES_KEYSIZE_128
) {
556 if (memcmp(ctx
->key
, key
, AES_KEYSIZE_128
)) {
557 memcpy(ctx
->key
, key
, len
);
558 ctx
->flags
|= DCP_NEW_KEY
;
563 ctx
->fallback
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
564 ctx
->fallback
->base
.crt_flags
|=
565 (tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
567 ret
= crypto_ablkcipher_setkey(ctx
->fallback
, key
, len
);
569 struct crypto_tfm
*tfm_aux
= crypto_ablkcipher_tfm(tfm
);
571 tfm_aux
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
572 tfm_aux
->crt_flags
|=
573 (ctx
->fallback
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
578 static int dcp_aes_cbc_crypt(struct ablkcipher_request
*req
, int mode
)
580 struct dcp_dev_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
581 struct dcp_dev
*dev
= global_dev
;
585 if (!IS_ALIGNED(req
->nbytes
, AES_BLOCK_SIZE
))
590 spin_lock_irqsave(&dev
->queue_lock
, flags
);
591 err
= ablkcipher_enqueue_request(&dev
->queue
, req
);
592 spin_unlock_irqrestore(&dev
->queue_lock
, flags
);
594 flags
= test_and_set_bit(DCP_FLAG_BUSY
, &dev
->flags
);
596 if (!(flags
& DCP_FLAG_BUSY
))
597 tasklet_schedule(&dev
->queue_task
);
602 static int dcp_aes_cbc_encrypt(struct ablkcipher_request
*req
)
604 struct crypto_tfm
*tfm
=
605 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
606 struct dcp_op
*ctx
= crypto_ablkcipher_ctx(
607 crypto_ablkcipher_reqtfm(req
));
609 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
611 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
612 err
= crypto_ablkcipher_encrypt(req
);
613 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
617 return dcp_aes_cbc_crypt(req
, DCP_AES
| DCP_ENC
| DCP_CBC
);
620 static int dcp_aes_cbc_decrypt(struct ablkcipher_request
*req
)
622 struct crypto_tfm
*tfm
=
623 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
624 struct dcp_op
*ctx
= crypto_ablkcipher_ctx(
625 crypto_ablkcipher_reqtfm(req
));
627 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
629 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
630 err
= crypto_ablkcipher_decrypt(req
);
631 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
634 return dcp_aes_cbc_crypt(req
, DCP_AES
| DCP_DEC
| DCP_CBC
);
637 static struct crypto_alg algs
[] = {
639 .cra_name
= "cbc(aes)",
640 .cra_driver_name
= "dcp-cbc-aes",
642 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
643 CRYPTO_ALG_NEED_FALLBACK
,
644 .cra_blocksize
= AES_KEYSIZE_128
,
645 .cra_type
= &crypto_ablkcipher_type
,
647 .cra_u
.ablkcipher
= {
648 .min_keysize
= AES_KEYSIZE_128
,
649 .max_keysize
= AES_KEYSIZE_128
,
650 .setkey
= dcp_aes_setkey
,
651 .encrypt
= dcp_aes_cbc_encrypt
,
652 .decrypt
= dcp_aes_cbc_decrypt
,
653 .ivsize
= AES_KEYSIZE_128
,
659 /* DCP bootstream verification interface: uses OTP key for crypto */
660 static int dcp_bootstream_open(struct inode
*inode
, struct file
*file
)
662 file
->private_data
= container_of((file
->private_data
),
663 struct dcp_dev
, dcp_bootstream_misc
);
667 static long dcp_bootstream_ioctl(struct file
*file
,
668 unsigned int cmd
, unsigned long arg
)
670 struct dcp_dev
*dev
= (struct dcp_dev
*) file
->private_data
;
671 void __user
*argp
= (void __user
*)arg
;
677 if (cmd
!= DBS_ENC
&& cmd
!= DBS_DEC
)
680 if (copy_from_user(dev
->payload_base
, argp
, 16))
683 if (test_and_set_bit(DCP_FLAG_BUSY
, &dev
->flags
))
686 dev
->ctx
= kzalloc(sizeof(struct dcp_op
), GFP_KERNEL
);
689 "cannot allocate context for OTP crypto");
690 clear_bit(DCP_FLAG_BUSY
, &dev
->flags
);
694 dev
->ctx
->flags
= DCP_AES
| DCP_ECB
| DCP_OTP_KEY
| DCP_CBC_INIT
;
695 dev
->ctx
->flags
|= (cmd
== DBS_ENC
) ? DCP_ENC
: DCP_DEC
;
696 dev
->hw_pkg
[0]->src
= dev
->payload_base_dma
;
697 dev
->hw_pkg
[0]->dst
= dev
->payload_base_dma
;
698 dev
->hw_pkg
[0]->size
= 16;
700 dcp_op_start(dev
, 0);
702 while (test_bit(DCP_FLAG_BUSY
, &dev
->flags
))
705 ret
= dev
->ctx
->stat
;
706 if (!ret
&& copy_to_user(argp
, dev
->payload_base
, 16))
714 static const struct file_operations dcp_bootstream_fops
= {
715 .owner
= THIS_MODULE
,
716 .unlocked_ioctl
= dcp_bootstream_ioctl
,
717 .open
= dcp_bootstream_open
,
720 static int dcp_probe(struct platform_device
*pdev
)
722 struct dcp_dev
*dev
= NULL
;
726 dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dev
), GFP_KERNEL
);
731 dev
->dev
= &pdev
->dev
;
733 platform_set_drvdata(pdev
, dev
);
735 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
736 dev
->dcp_regs_base
= devm_ioremap_resource(&pdev
->dev
, r
);
737 if (IS_ERR(dev
->dcp_regs_base
))
738 return PTR_ERR(dev
->dcp_regs_base
);
740 dcp_set(dev
, DCP_CTRL_SFRST
, DCP_REG_CTRL
);
742 dcp_clear(dev
, DCP_CTRL_SFRST
| DCP_CTRL_CLKGATE
, DCP_REG_CTRL
);
744 dcp_write(dev
, DCP_CTRL_GATHER_RES_WRITE
|
745 DCP_CTRL_ENABLE_CONTEXT_CACHE
| DCP_CTRL_CH_IRQ_E_1
,
748 dcp_write(dev
, DCP_CHAN_CTRL_ENABLE_1
, DCP_REG_CHAN_CTRL
);
750 for (i
= 0; i
< 4; i
++)
751 dcp_clear(dev
, -1, dcp_chan_reg(DCP_REG_CHAN_STAT
, i
));
753 dcp_clear(dev
, -1, DCP_REG_STAT
);
756 r
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
758 dev_err(&pdev
->dev
, "can't get IRQ resource (0)\n");
761 dev
->dcp_vmi_irq
= r
->start
;
762 ret
= devm_request_irq(&pdev
->dev
, dev
->dcp_vmi_irq
, dcp_vmi_irq
, 0,
765 dev_err(&pdev
->dev
, "can't request_irq (0)\n");
769 r
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
771 dev_err(&pdev
->dev
, "can't get IRQ resource (1)\n");
774 dev
->dcp_irq
= r
->start
;
775 ret
= devm_request_irq(&pdev
->dev
, dev
->dcp_irq
, dcp_irq
, 0, "dcp",
778 dev_err(&pdev
->dev
, "can't request_irq (1)\n");
782 dev
->hw_pkg
[0] = dma_alloc_coherent(&pdev
->dev
,
783 DCP_MAX_PKG
* sizeof(struct dcp_hw_packet
),
786 if (!dev
->hw_pkg
[0]) {
787 dev_err(&pdev
->dev
, "Could not allocate hw descriptors\n");
791 for (i
= 1; i
< DCP_MAX_PKG
; i
++) {
792 dev
->hw_pkg
[i
- 1]->next
= dev
->hw_phys_pkg
793 + i
* sizeof(struct dcp_hw_packet
);
794 dev
->hw_pkg
[i
] = dev
->hw_pkg
[i
- 1] + 1;
796 dev
->hw_pkg
[i
- 1]->next
= dev
->hw_phys_pkg
;
799 dev
->payload_base
= dma_alloc_coherent(&pdev
->dev
, 2 * AES_KEYSIZE_128
,
800 &dev
->payload_base_dma
, GFP_KERNEL
);
801 if (!dev
->payload_base
) {
802 dev_err(&pdev
->dev
, "Could not allocate memory for key\n");
804 goto err_free_hw_packet
;
806 tasklet_init(&dev
->queue_task
, dcp_queue_task
,
807 (unsigned long) dev
);
808 tasklet_init(&dev
->done_task
, dcp_done_task
,
809 (unsigned long) dev
);
810 spin_lock_init(&dev
->queue_lock
);
812 crypto_init_queue(&dev
->queue
, 10);
814 init_timer(&dev
->watchdog
);
815 dev
->watchdog
.function
= &dcp_watchdog
;
816 dev
->watchdog
.data
= (unsigned long)dev
;
818 dev
->dcp_bootstream_misc
.minor
= MISC_DYNAMIC_MINOR
,
819 dev
->dcp_bootstream_misc
.name
= "dcpboot",
820 dev
->dcp_bootstream_misc
.fops
= &dcp_bootstream_fops
,
821 ret
= misc_register(&dev
->dcp_bootstream_misc
);
823 dev_err(dev
->dev
, "Unable to register misc device\n");
824 goto err_free_key_iv
;
827 for (i
= 0; i
< ARRAY_SIZE(algs
); i
++) {
828 algs
[i
].cra_priority
= 300;
829 algs
[i
].cra_ctxsize
= sizeof(struct dcp_op
);
830 algs
[i
].cra_module
= THIS_MODULE
;
831 algs
[i
].cra_init
= dcp_cra_init
;
832 algs
[i
].cra_exit
= dcp_cra_exit
;
833 if (crypto_register_alg(&algs
[i
])) {
834 dev_err(&pdev
->dev
, "register algorithm failed\n");
839 dev_notice(&pdev
->dev
, "DCP crypto enabled.!\n");
844 for (j
= 0; j
< i
; j
++)
845 crypto_unregister_alg(&algs
[j
]);
847 tasklet_kill(&dev
->done_task
);
848 tasklet_kill(&dev
->queue_task
);
849 dma_free_coherent(&pdev
->dev
, 2 * AES_KEYSIZE_128
, dev
->payload_base
,
850 dev
->payload_base_dma
);
852 dma_free_coherent(&pdev
->dev
, DCP_MAX_PKG
*
853 sizeof(struct dcp_hw_packet
), dev
->hw_pkg
[0],
859 static int dcp_remove(struct platform_device
*pdev
)
863 dev
= platform_get_drvdata(pdev
);
865 misc_deregister(&dev
->dcp_bootstream_misc
);
867 for (j
= 0; j
< ARRAY_SIZE(algs
); j
++)
868 crypto_unregister_alg(&algs
[j
]);
870 tasklet_kill(&dev
->done_task
);
871 tasklet_kill(&dev
->queue_task
);
873 dma_free_coherent(&pdev
->dev
, 2 * AES_KEYSIZE_128
, dev
->payload_base
,
874 dev
->payload_base_dma
);
876 dma_free_coherent(&pdev
->dev
,
877 DCP_MAX_PKG
* sizeof(struct dcp_hw_packet
),
878 dev
->hw_pkg
[0], dev
->hw_phys_pkg
);
883 static struct of_device_id fs_dcp_of_match
[] = {
884 { .compatible
= "fsl-dcp"},
888 static struct platform_driver fs_dcp_driver
= {
890 .remove
= dcp_remove
,
893 .owner
= THIS_MODULE
,
894 .of_match_table
= fs_dcp_of_match
898 module_platform_driver(fs_dcp_driver
);
901 MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>");
902 MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
903 MODULE_LICENSE("GPL");