2 * Support for Marvell's crypto engine which can be found on some Orion5X
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/genalloc.h>
13 #include <linux/interrupt.h>
15 #include <linux/kthread.h>
16 #include <linux/platform_device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/clk.h>
21 #include <crypto/hmac.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
25 #include <linux/of_platform.h>
26 #include <linux/of_irq.h>
30 #define MV_CESA "MV-CESA:"
31 #define MAX_HW_HASH_SIZE 0xFFFF
32 #define MV_CESA_EXPIRE 500 /* msec */
34 #define MV_CESA_DEFAULT_SRAM_SIZE 2048
38 * /---------------------------------------\
39 * | | request complete
41 * IDLE -> new request -> BUSY -> done -> DEQUEUE
43 * | | more scatter entries
53 * struct req_progress - used for every crypt request
54 * @src_sg_it: sg iterator for src
55 * @dst_sg_it: sg iterator for dst
56 * @sg_src_left: bytes left in src to process (scatter list)
57 * @src_start: offset to add to src start position (scatter list)
58 * @crypt_len: length of current hw crypt/hash process
59 * @hw_nbytes: total bytes to process in hw for this request
60 * @copy_back: whether to copy data back (crypt) or not (hash)
61 * @sg_dst_left: bytes left dst to process in this scatter list
62 * @dst_start: offset to add to dst start position (scatter list)
63 * @hw_processed_bytes: number of bytes processed by hw (request).
65 * sg helper are used to iterate over the scatterlist. Since the size of the
66 * SRAM may be less than the scatter size, this struct struct is used to keep
67 * track of progress within current scatterlist.
70 struct sg_mapping_iter src_sg_it
;
71 struct sg_mapping_iter dst_sg_it
;
72 void (*complete
) (void);
73 void (*process
) (int is_first
);
84 int hw_processed_bytes
;
90 struct gen_pool
*sram_pool
;
94 struct task_struct
*queue_th
;
96 /* the lock protects queue and eng_st */
98 struct crypto_queue queue
;
99 enum engine_status eng_st
;
100 struct timer_list completion_timer
;
101 struct crypto_async_request
*cur_req
;
102 struct req_progress p
;
109 static struct crypto_priv
*cpg
;
112 u8 aes_enc_key
[AES_KEY_LEN
];
115 u32 need_calc_aes_dkey
;
133 struct mv_tfm_hash_ctx
{
134 struct crypto_shash
*fallback
;
135 struct crypto_shash
*base_hash
;
136 u32 ivs
[2 * SHA1_DIGEST_SIZE
/ 4];
141 struct mv_req_hash_ctx
{
143 u32 state
[SHA1_DIGEST_SIZE
/ 4];
144 u8 buffer
[SHA1_BLOCK_SIZE
];
145 int first_hash
; /* marks that we don't have previous state */
146 int last_chunk
; /* marks that this is the 'final' request */
147 int extra_bytes
; /* unprocessed bytes in buffer */
152 static void mv_completion_timer_callback(unsigned long unused
)
154 int active
= readl(cpg
->reg
+ SEC_ACCEL_CMD
) & SEC_CMD_EN_SEC_ACCL0
;
156 printk(KERN_ERR MV_CESA
157 "completion timer expired (CESA %sactive), cleaning up.\n",
160 del_timer(&cpg
->completion_timer
);
161 writel(SEC_CMD_DISABLE_SEC
, cpg
->reg
+ SEC_ACCEL_CMD
);
162 while(readl(cpg
->reg
+ SEC_ACCEL_CMD
) & SEC_CMD_DISABLE_SEC
)
163 printk(KERN_INFO MV_CESA
"%s: waiting for engine finishing\n", __func__
);
164 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
165 wake_up_process(cpg
->queue_th
);
168 static void mv_setup_timer(void)
170 setup_timer(&cpg
->completion_timer
, &mv_completion_timer_callback
, 0);
171 mod_timer(&cpg
->completion_timer
,
172 jiffies
+ msecs_to_jiffies(MV_CESA_EXPIRE
));
175 static void compute_aes_dec_key(struct mv_ctx
*ctx
)
177 struct crypto_aes_ctx gen_aes_key
;
180 if (!ctx
->need_calc_aes_dkey
)
183 crypto_aes_expand_key(&gen_aes_key
, ctx
->aes_enc_key
, ctx
->key_len
);
185 key_pos
= ctx
->key_len
+ 24;
186 memcpy(ctx
->aes_dec_key
, &gen_aes_key
.key_enc
[key_pos
], 4 * 4);
187 switch (ctx
->key_len
) {
188 case AES_KEYSIZE_256
:
191 case AES_KEYSIZE_192
:
193 memcpy(&ctx
->aes_dec_key
[4], &gen_aes_key
.key_enc
[key_pos
],
197 ctx
->need_calc_aes_dkey
= 0;
200 static int mv_setkey_aes(struct crypto_ablkcipher
*cipher
, const u8
*key
,
203 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
204 struct mv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
207 case AES_KEYSIZE_128
:
208 case AES_KEYSIZE_192
:
209 case AES_KEYSIZE_256
:
212 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
216 ctx
->need_calc_aes_dkey
= 1;
218 memcpy(ctx
->aes_enc_key
, key
, AES_KEY_LEN
);
222 static void copy_src_to_buf(struct req_progress
*p
, char *dbuf
, int len
)
229 if (!p
->sg_src_left
) {
230 ret
= sg_miter_next(&p
->src_sg_it
);
232 p
->sg_src_left
= p
->src_sg_it
.length
;
236 sbuf
= p
->src_sg_it
.addr
+ p
->src_start
;
238 copy_len
= min(p
->sg_src_left
, len
);
239 memcpy(dbuf
, sbuf
, copy_len
);
241 p
->src_start
+= copy_len
;
242 p
->sg_src_left
-= copy_len
;
249 static void setup_data_in(void)
251 struct req_progress
*p
= &cpg
->p
;
253 min(p
->hw_nbytes
- p
->hw_processed_bytes
, cpg
->max_req_size
);
254 copy_src_to_buf(p
, cpg
->sram
+ SRAM_DATA_IN_START
+ p
->crypt_len
,
255 data_in_sram
- p
->crypt_len
);
256 p
->crypt_len
= data_in_sram
;
259 static void mv_process_current_q(int first_block
)
261 struct ablkcipher_request
*req
= ablkcipher_request_cast(cpg
->cur_req
);
262 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
263 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
264 struct sec_accel_config op
;
266 switch (req_ctx
->op
) {
268 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_ECB
;
272 op
.config
= CFG_OP_CRYPT_ONLY
| CFG_ENCM_AES
| CFG_ENC_MODE_CBC
;
273 op
.enc_iv
= ENC_IV_POINT(SRAM_DATA_IV
) |
274 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF
);
276 memcpy(cpg
->sram
+ SRAM_DATA_IV
, req
->info
, 16);
279 if (req_ctx
->decrypt
) {
280 op
.config
|= CFG_DIR_DEC
;
281 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_dec_key
,
284 op
.config
|= CFG_DIR_ENC
;
285 memcpy(cpg
->sram
+ SRAM_DATA_KEY_P
, ctx
->aes_enc_key
,
289 switch (ctx
->key_len
) {
290 case AES_KEYSIZE_128
:
291 op
.config
|= CFG_AES_LEN_128
;
293 case AES_KEYSIZE_192
:
294 op
.config
|= CFG_AES_LEN_192
;
296 case AES_KEYSIZE_256
:
297 op
.config
|= CFG_AES_LEN_256
;
300 op
.enc_p
= ENC_P_SRC(SRAM_DATA_IN_START
) |
301 ENC_P_DST(SRAM_DATA_OUT_START
);
302 op
.enc_key_p
= SRAM_DATA_KEY_P
;
305 op
.enc_len
= cpg
->p
.crypt_len
;
306 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
,
307 sizeof(struct sec_accel_config
));
311 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
314 static void mv_crypto_algo_completion(void)
316 struct ablkcipher_request
*req
= ablkcipher_request_cast(cpg
->cur_req
);
317 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
319 sg_miter_stop(&cpg
->p
.src_sg_it
);
320 sg_miter_stop(&cpg
->p
.dst_sg_it
);
322 if (req_ctx
->op
!= COP_AES_CBC
)
325 memcpy(req
->info
, cpg
->sram
+ SRAM_DATA_IV_BUF
, 16);
328 static void mv_process_hash_current(int first_block
)
330 struct ahash_request
*req
= ahash_request_cast(cpg
->cur_req
);
331 const struct mv_tfm_hash_ctx
*tfm_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
332 struct mv_req_hash_ctx
*req_ctx
= ahash_request_ctx(req
);
333 struct req_progress
*p
= &cpg
->p
;
334 struct sec_accel_config op
= { 0 };
337 switch (req_ctx
->op
) {
340 op
.config
= CFG_OP_MAC_ONLY
| CFG_MACM_SHA1
;
343 op
.config
= CFG_OP_MAC_ONLY
| CFG_MACM_HMAC_SHA1
;
344 memcpy(cpg
->sram
+ SRAM_HMAC_IV_IN
,
345 tfm_ctx
->ivs
, sizeof(tfm_ctx
->ivs
));
350 MAC_SRC_DATA_P(SRAM_DATA_IN_START
) | MAC_SRC_TOTAL_LEN((u32
)
357 MAC_DIGEST_P(SRAM_DIGEST_BUF
) | MAC_FRAG_LEN(p
->crypt_len
);
359 MAC_INNER_IV_P(SRAM_HMAC_IV_IN
) |
360 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT
);
362 is_last
= req_ctx
->last_chunk
363 && (p
->hw_processed_bytes
+ p
->crypt_len
>= p
->hw_nbytes
)
364 && (req_ctx
->count
<= MAX_HW_HASH_SIZE
);
365 if (req_ctx
->first_hash
) {
367 op
.config
|= CFG_NOT_FRAG
;
369 op
.config
|= CFG_FIRST_FRAG
;
371 req_ctx
->first_hash
= 0;
374 op
.config
|= CFG_LAST_FRAG
;
376 op
.config
|= CFG_MID_FRAG
;
379 writel(req_ctx
->state
[0], cpg
->reg
+ DIGEST_INITIAL_VAL_A
);
380 writel(req_ctx
->state
[1], cpg
->reg
+ DIGEST_INITIAL_VAL_B
);
381 writel(req_ctx
->state
[2], cpg
->reg
+ DIGEST_INITIAL_VAL_C
);
382 writel(req_ctx
->state
[3], cpg
->reg
+ DIGEST_INITIAL_VAL_D
);
383 writel(req_ctx
->state
[4], cpg
->reg
+ DIGEST_INITIAL_VAL_E
);
387 memcpy(cpg
->sram
+ SRAM_CONFIG
, &op
, sizeof(struct sec_accel_config
));
391 writel(SEC_CMD_EN_SEC_ACCL0
, cpg
->reg
+ SEC_ACCEL_CMD
);
394 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx
*ctx
,
395 struct shash_desc
*desc
)
398 struct sha1_state shash_state
;
400 shash_state
.count
= ctx
->count
+ ctx
->count_add
;
401 for (i
= 0; i
< 5; i
++)
402 shash_state
.state
[i
] = ctx
->state
[i
];
403 memcpy(shash_state
.buffer
, ctx
->buffer
, sizeof(shash_state
.buffer
));
404 return crypto_shash_import(desc
, &shash_state
);
407 static int mv_hash_final_fallback(struct ahash_request
*req
)
409 const struct mv_tfm_hash_ctx
*tfm_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
410 struct mv_req_hash_ctx
*req_ctx
= ahash_request_ctx(req
);
411 SHASH_DESC_ON_STACK(shash
, tfm_ctx
->fallback
);
414 shash
->tfm
= tfm_ctx
->fallback
;
415 shash
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
416 if (unlikely(req_ctx
->first_hash
)) {
417 crypto_shash_init(shash
);
418 crypto_shash_update(shash
, req_ctx
->buffer
,
419 req_ctx
->extra_bytes
);
421 /* only SHA1 for now....
423 rc
= mv_hash_import_sha1_ctx(req_ctx
, shash
);
427 rc
= crypto_shash_final(shash
, req
->result
);
432 static void mv_save_digest_state(struct mv_req_hash_ctx
*ctx
)
434 ctx
->state
[0] = readl(cpg
->reg
+ DIGEST_INITIAL_VAL_A
);
435 ctx
->state
[1] = readl(cpg
->reg
+ DIGEST_INITIAL_VAL_B
);
436 ctx
->state
[2] = readl(cpg
->reg
+ DIGEST_INITIAL_VAL_C
);
437 ctx
->state
[3] = readl(cpg
->reg
+ DIGEST_INITIAL_VAL_D
);
438 ctx
->state
[4] = readl(cpg
->reg
+ DIGEST_INITIAL_VAL_E
);
441 static void mv_hash_algo_completion(void)
443 struct ahash_request
*req
= ahash_request_cast(cpg
->cur_req
);
444 struct mv_req_hash_ctx
*ctx
= ahash_request_ctx(req
);
446 if (ctx
->extra_bytes
)
447 copy_src_to_buf(&cpg
->p
, ctx
->buffer
, ctx
->extra_bytes
);
448 sg_miter_stop(&cpg
->p
.src_sg_it
);
450 if (likely(ctx
->last_chunk
)) {
451 if (likely(ctx
->count
<= MAX_HW_HASH_SIZE
)) {
452 memcpy(req
->result
, cpg
->sram
+ SRAM_DIGEST_BUF
,
453 crypto_ahash_digestsize(crypto_ahash_reqtfm
456 mv_save_digest_state(ctx
);
457 mv_hash_final_fallback(req
);
460 mv_save_digest_state(ctx
);
464 static void dequeue_complete_req(void)
466 struct crypto_async_request
*req
= cpg
->cur_req
;
469 cpg
->p
.hw_processed_bytes
+= cpg
->p
.crypt_len
;
470 if (cpg
->p
.copy_back
) {
471 int need_copy_len
= cpg
->p
.crypt_len
;
476 if (!cpg
->p
.sg_dst_left
) {
477 ret
= sg_miter_next(&cpg
->p
.dst_sg_it
);
479 cpg
->p
.sg_dst_left
= cpg
->p
.dst_sg_it
.length
;
480 cpg
->p
.dst_start
= 0;
483 buf
= cpg
->p
.dst_sg_it
.addr
;
484 buf
+= cpg
->p
.dst_start
;
486 dst_copy
= min(need_copy_len
, cpg
->p
.sg_dst_left
);
489 cpg
->sram
+ SRAM_DATA_OUT_START
+ sram_offset
,
491 sram_offset
+= dst_copy
;
492 cpg
->p
.sg_dst_left
-= dst_copy
;
493 need_copy_len
-= dst_copy
;
494 cpg
->p
.dst_start
+= dst_copy
;
495 } while (need_copy_len
> 0);
498 cpg
->p
.crypt_len
= 0;
500 BUG_ON(cpg
->eng_st
!= ENGINE_W_DEQUEUE
);
501 if (cpg
->p
.hw_processed_bytes
< cpg
->p
.hw_nbytes
) {
502 /* process next scatter list entry */
503 cpg
->eng_st
= ENGINE_BUSY
;
507 cpg
->eng_st
= ENGINE_IDLE
;
509 req
->complete(req
, 0);
514 static int count_sgs(struct scatterlist
*sl
, unsigned int total_bytes
)
520 cur_len
= sl
[i
].length
;
522 if (total_bytes
> cur_len
)
523 total_bytes
-= cur_len
;
531 static void mv_start_new_crypt_req(struct ablkcipher_request
*req
)
533 struct req_progress
*p
= &cpg
->p
;
536 cpg
->cur_req
= &req
->base
;
537 memset(p
, 0, sizeof(struct req_progress
));
538 p
->hw_nbytes
= req
->nbytes
;
539 p
->complete
= mv_crypto_algo_completion
;
540 p
->process
= mv_process_current_q
;
543 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
544 sg_miter_start(&p
->src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
546 num_sgs
= count_sgs(req
->dst
, req
->nbytes
);
547 sg_miter_start(&p
->dst_sg_it
, req
->dst
, num_sgs
, SG_MITER_TO_SG
);
549 mv_process_current_q(1);
552 static void mv_start_new_hash_req(struct ahash_request
*req
)
554 struct req_progress
*p
= &cpg
->p
;
555 struct mv_req_hash_ctx
*ctx
= ahash_request_ctx(req
);
556 int num_sgs
, hw_bytes
, old_extra_bytes
, rc
;
557 cpg
->cur_req
= &req
->base
;
558 memset(p
, 0, sizeof(struct req_progress
));
559 hw_bytes
= req
->nbytes
+ ctx
->extra_bytes
;
560 old_extra_bytes
= ctx
->extra_bytes
;
562 ctx
->extra_bytes
= hw_bytes
% SHA1_BLOCK_SIZE
;
563 if (ctx
->extra_bytes
!= 0
564 && (!ctx
->last_chunk
|| ctx
->count
> MAX_HW_HASH_SIZE
))
565 hw_bytes
-= ctx
->extra_bytes
;
567 ctx
->extra_bytes
= 0;
569 num_sgs
= count_sgs(req
->src
, req
->nbytes
);
570 sg_miter_start(&p
->src_sg_it
, req
->src
, num_sgs
, SG_MITER_FROM_SG
);
573 p
->hw_nbytes
= hw_bytes
;
574 p
->complete
= mv_hash_algo_completion
;
575 p
->process
= mv_process_hash_current
;
577 if (unlikely(old_extra_bytes
)) {
578 memcpy(cpg
->sram
+ SRAM_DATA_IN_START
, ctx
->buffer
,
580 p
->crypt_len
= old_extra_bytes
;
583 mv_process_hash_current(1);
585 copy_src_to_buf(p
, ctx
->buffer
+ old_extra_bytes
,
586 ctx
->extra_bytes
- old_extra_bytes
);
587 sg_miter_stop(&p
->src_sg_it
);
589 rc
= mv_hash_final_fallback(req
);
592 cpg
->eng_st
= ENGINE_IDLE
;
594 req
->base
.complete(&req
->base
, rc
);
599 static int queue_manag(void *data
)
601 cpg
->eng_st
= ENGINE_IDLE
;
603 struct crypto_async_request
*async_req
= NULL
;
604 struct crypto_async_request
*backlog
= NULL
;
606 __set_current_state(TASK_INTERRUPTIBLE
);
608 if (cpg
->eng_st
== ENGINE_W_DEQUEUE
)
609 dequeue_complete_req();
611 spin_lock_irq(&cpg
->lock
);
612 if (cpg
->eng_st
== ENGINE_IDLE
) {
613 backlog
= crypto_get_backlog(&cpg
->queue
);
614 async_req
= crypto_dequeue_request(&cpg
->queue
);
616 BUG_ON(cpg
->eng_st
!= ENGINE_IDLE
);
617 cpg
->eng_st
= ENGINE_BUSY
;
620 spin_unlock_irq(&cpg
->lock
);
623 backlog
->complete(backlog
, -EINPROGRESS
);
628 if (crypto_tfm_alg_type(async_req
->tfm
) !=
629 CRYPTO_ALG_TYPE_AHASH
) {
630 struct ablkcipher_request
*req
=
631 ablkcipher_request_cast(async_req
);
632 mv_start_new_crypt_req(req
);
634 struct ahash_request
*req
=
635 ahash_request_cast(async_req
);
636 mv_start_new_hash_req(req
);
643 } while (!kthread_should_stop());
647 static int mv_handle_req(struct crypto_async_request
*req
)
652 spin_lock_irqsave(&cpg
->lock
, flags
);
653 ret
= crypto_enqueue_request(&cpg
->queue
, req
);
654 spin_unlock_irqrestore(&cpg
->lock
, flags
);
655 wake_up_process(cpg
->queue_th
);
659 static int mv_enc_aes_ecb(struct ablkcipher_request
*req
)
661 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
663 req_ctx
->op
= COP_AES_ECB
;
664 req_ctx
->decrypt
= 0;
666 return mv_handle_req(&req
->base
);
669 static int mv_dec_aes_ecb(struct ablkcipher_request
*req
)
671 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
672 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
674 req_ctx
->op
= COP_AES_ECB
;
675 req_ctx
->decrypt
= 1;
677 compute_aes_dec_key(ctx
);
678 return mv_handle_req(&req
->base
);
681 static int mv_enc_aes_cbc(struct ablkcipher_request
*req
)
683 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
685 req_ctx
->op
= COP_AES_CBC
;
686 req_ctx
->decrypt
= 0;
688 return mv_handle_req(&req
->base
);
691 static int mv_dec_aes_cbc(struct ablkcipher_request
*req
)
693 struct mv_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
694 struct mv_req_ctx
*req_ctx
= ablkcipher_request_ctx(req
);
696 req_ctx
->op
= COP_AES_CBC
;
697 req_ctx
->decrypt
= 1;
699 compute_aes_dec_key(ctx
);
700 return mv_handle_req(&req
->base
);
703 static int mv_cra_init(struct crypto_tfm
*tfm
)
705 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct mv_req_ctx
);
709 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx
*ctx
, int op
,
710 int is_last
, unsigned int req_len
,
713 memset(ctx
, 0, sizeof(*ctx
));
715 ctx
->count
= req_len
;
717 ctx
->last_chunk
= is_last
;
718 ctx
->count_add
= count_add
;
721 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx
*ctx
, int is_last
,
724 ctx
->last_chunk
= is_last
;
725 ctx
->count
+= req_len
;
728 static int mv_hash_init(struct ahash_request
*req
)
730 const struct mv_tfm_hash_ctx
*tfm_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
731 mv_init_hash_req_ctx(ahash_request_ctx(req
), tfm_ctx
->op
, 0, 0,
736 static int mv_hash_update(struct ahash_request
*req
)
741 mv_update_hash_req_ctx(ahash_request_ctx(req
), 0, req
->nbytes
);
742 return mv_handle_req(&req
->base
);
745 static int mv_hash_final(struct ahash_request
*req
)
747 struct mv_req_hash_ctx
*ctx
= ahash_request_ctx(req
);
749 ahash_request_set_crypt(req
, NULL
, req
->result
, 0);
750 mv_update_hash_req_ctx(ctx
, 1, 0);
751 return mv_handle_req(&req
->base
);
754 static int mv_hash_finup(struct ahash_request
*req
)
756 mv_update_hash_req_ctx(ahash_request_ctx(req
), 1, req
->nbytes
);
757 return mv_handle_req(&req
->base
);
760 static int mv_hash_digest(struct ahash_request
*req
)
762 const struct mv_tfm_hash_ctx
*tfm_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
763 mv_init_hash_req_ctx(ahash_request_ctx(req
), tfm_ctx
->op
, 1,
764 req
->nbytes
, tfm_ctx
->count_add
);
765 return mv_handle_req(&req
->base
);
768 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx
*ctx
, const void *istate
,
771 const struct sha1_state
*isha1_state
= istate
, *osha1_state
= ostate
;
773 for (i
= 0; i
< 5; i
++) {
774 ctx
->ivs
[i
] = cpu_to_be32(isha1_state
->state
[i
]);
775 ctx
->ivs
[i
+ 5] = cpu_to_be32(osha1_state
->state
[i
]);
779 static int mv_hash_setkey(struct crypto_ahash
*tfm
, const u8
* key
,
783 struct mv_tfm_hash_ctx
*ctx
= crypto_tfm_ctx(&tfm
->base
);
789 rc
= crypto_shash_setkey(ctx
->fallback
, key
, keylen
);
793 /* Can't see a way to extract the ipad/opad from the fallback tfm
794 so I'm basically copying code from the hmac module */
795 bs
= crypto_shash_blocksize(ctx
->base_hash
);
796 ds
= crypto_shash_digestsize(ctx
->base_hash
);
797 ss
= crypto_shash_statesize(ctx
->base_hash
);
800 SHASH_DESC_ON_STACK(shash
, ctx
->base_hash
);
806 shash
->tfm
= ctx
->base_hash
;
807 shash
->flags
= crypto_shash_get_flags(ctx
->base_hash
) &
808 CRYPTO_TFM_REQ_MAY_SLEEP
;
814 crypto_shash_digest(shash
, key
, keylen
, ipad
);
820 memcpy(ipad
, key
, keylen
);
822 memset(ipad
+ keylen
, 0, bs
- keylen
);
823 memcpy(opad
, ipad
, bs
);
825 for (i
= 0; i
< bs
; i
++) {
826 ipad
[i
] ^= HMAC_IPAD_VALUE
;
827 opad
[i
] ^= HMAC_OPAD_VALUE
;
830 rc
= crypto_shash_init(shash
) ? :
831 crypto_shash_update(shash
, ipad
, bs
) ? :
832 crypto_shash_export(shash
, ipad
) ? :
833 crypto_shash_init(shash
) ? :
834 crypto_shash_update(shash
, opad
, bs
) ? :
835 crypto_shash_export(shash
, opad
);
838 mv_hash_init_ivs(ctx
, ipad
, opad
);
844 static int mv_cra_hash_init(struct crypto_tfm
*tfm
, const char *base_hash_name
,
845 enum hash_op op
, int count_add
)
847 const char *fallback_driver_name
= crypto_tfm_alg_name(tfm
);
848 struct mv_tfm_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
849 struct crypto_shash
*fallback_tfm
= NULL
;
850 struct crypto_shash
*base_hash
= NULL
;
854 ctx
->count_add
= count_add
;
856 /* Allocate a fallback and abort if it failed. */
857 fallback_tfm
= crypto_alloc_shash(fallback_driver_name
, 0,
858 CRYPTO_ALG_NEED_FALLBACK
);
859 if (IS_ERR(fallback_tfm
)) {
860 printk(KERN_WARNING MV_CESA
861 "Fallback driver '%s' could not be loaded!\n",
862 fallback_driver_name
);
863 err
= PTR_ERR(fallback_tfm
);
866 ctx
->fallback
= fallback_tfm
;
868 if (base_hash_name
) {
869 /* Allocate a hash to compute the ipad/opad of hmac. */
870 base_hash
= crypto_alloc_shash(base_hash_name
, 0,
871 CRYPTO_ALG_NEED_FALLBACK
);
872 if (IS_ERR(base_hash
)) {
873 printk(KERN_WARNING MV_CESA
874 "Base driver '%s' could not be loaded!\n",
876 err
= PTR_ERR(base_hash
);
880 ctx
->base_hash
= base_hash
;
882 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
883 sizeof(struct mv_req_hash_ctx
) +
884 crypto_shash_descsize(ctx
->fallback
));
887 crypto_free_shash(fallback_tfm
);
892 static void mv_cra_hash_exit(struct crypto_tfm
*tfm
)
894 struct mv_tfm_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
896 crypto_free_shash(ctx
->fallback
);
898 crypto_free_shash(ctx
->base_hash
);
901 static int mv_cra_hash_sha1_init(struct crypto_tfm
*tfm
)
903 return mv_cra_hash_init(tfm
, NULL
, COP_SHA1
, 0);
906 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm
*tfm
)
908 return mv_cra_hash_init(tfm
, "sha1", COP_HMAC_SHA1
, SHA1_BLOCK_SIZE
);
911 static irqreturn_t
crypto_int(int irq
, void *priv
)
915 val
= readl(cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
916 if (!(val
& SEC_INT_ACCEL0_DONE
))
919 if (!del_timer(&cpg
->completion_timer
)) {
920 printk(KERN_WARNING MV_CESA
921 "got an interrupt but no pending timer?\n");
923 val
&= ~SEC_INT_ACCEL0_DONE
;
924 writel(val
, cpg
->reg
+ FPGA_INT_STATUS
);
925 writel(val
, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
926 BUG_ON(cpg
->eng_st
!= ENGINE_BUSY
);
927 cpg
->eng_st
= ENGINE_W_DEQUEUE
;
928 wake_up_process(cpg
->queue_th
);
932 static struct crypto_alg mv_aes_alg_ecb
= {
933 .cra_name
= "ecb(aes)",
934 .cra_driver_name
= "mv-ecb-aes",
936 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
937 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
939 .cra_ctxsize
= sizeof(struct mv_ctx
),
941 .cra_type
= &crypto_ablkcipher_type
,
942 .cra_module
= THIS_MODULE
,
943 .cra_init
= mv_cra_init
,
946 .min_keysize
= AES_MIN_KEY_SIZE
,
947 .max_keysize
= AES_MAX_KEY_SIZE
,
948 .setkey
= mv_setkey_aes
,
949 .encrypt
= mv_enc_aes_ecb
,
950 .decrypt
= mv_dec_aes_ecb
,
955 static struct crypto_alg mv_aes_alg_cbc
= {
956 .cra_name
= "cbc(aes)",
957 .cra_driver_name
= "mv-cbc-aes",
959 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
960 CRYPTO_ALG_KERN_DRIVER_ONLY
| CRYPTO_ALG_ASYNC
,
961 .cra_blocksize
= AES_BLOCK_SIZE
,
962 .cra_ctxsize
= sizeof(struct mv_ctx
),
964 .cra_type
= &crypto_ablkcipher_type
,
965 .cra_module
= THIS_MODULE
,
966 .cra_init
= mv_cra_init
,
969 .ivsize
= AES_BLOCK_SIZE
,
970 .min_keysize
= AES_MIN_KEY_SIZE
,
971 .max_keysize
= AES_MAX_KEY_SIZE
,
972 .setkey
= mv_setkey_aes
,
973 .encrypt
= mv_enc_aes_cbc
,
974 .decrypt
= mv_dec_aes_cbc
,
979 static struct ahash_alg mv_sha1_alg
= {
980 .init
= mv_hash_init
,
981 .update
= mv_hash_update
,
982 .final
= mv_hash_final
,
983 .finup
= mv_hash_finup
,
984 .digest
= mv_hash_digest
,
986 .digestsize
= SHA1_DIGEST_SIZE
,
989 .cra_driver_name
= "mv-sha1",
992 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
993 CRYPTO_ALG_NEED_FALLBACK
,
994 .cra_blocksize
= SHA1_BLOCK_SIZE
,
995 .cra_ctxsize
= sizeof(struct mv_tfm_hash_ctx
),
996 .cra_init
= mv_cra_hash_sha1_init
,
997 .cra_exit
= mv_cra_hash_exit
,
998 .cra_module
= THIS_MODULE
,
1003 static struct ahash_alg mv_hmac_sha1_alg
= {
1004 .init
= mv_hash_init
,
1005 .update
= mv_hash_update
,
1006 .final
= mv_hash_final
,
1007 .finup
= mv_hash_finup
,
1008 .digest
= mv_hash_digest
,
1009 .setkey
= mv_hash_setkey
,
1011 .digestsize
= SHA1_DIGEST_SIZE
,
1013 .cra_name
= "hmac(sha1)",
1014 .cra_driver_name
= "mv-hmac-sha1",
1015 .cra_priority
= 300,
1017 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
1018 CRYPTO_ALG_NEED_FALLBACK
,
1019 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1020 .cra_ctxsize
= sizeof(struct mv_tfm_hash_ctx
),
1021 .cra_init
= mv_cra_hash_hmac_sha1_init
,
1022 .cra_exit
= mv_cra_hash_exit
,
1023 .cra_module
= THIS_MODULE
,
1028 static int mv_cesa_get_sram(struct platform_device
*pdev
,
1029 struct crypto_priv
*cp
)
1031 struct resource
*res
;
1032 u32 sram_size
= MV_CESA_DEFAULT_SRAM_SIZE
;
1034 of_property_read_u32(pdev
->dev
.of_node
, "marvell,crypto-sram-size",
1037 cp
->sram_size
= sram_size
;
1038 cp
->sram_pool
= of_gen_pool_get(pdev
->dev
.of_node
,
1039 "marvell,crypto-srams", 0);
1040 if (cp
->sram_pool
) {
1041 cp
->sram
= gen_pool_dma_alloc(cp
->sram_pool
, sram_size
,
1049 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1051 if (!res
|| resource_size(res
) < cp
->sram_size
)
1054 cp
->sram
= devm_ioremap_resource(&pdev
->dev
, res
);
1055 if (IS_ERR(cp
->sram
))
1056 return PTR_ERR(cp
->sram
);
1061 static int mv_probe(struct platform_device
*pdev
)
1063 struct crypto_priv
*cp
;
1064 struct resource
*res
;
1069 printk(KERN_ERR MV_CESA
"Second crypto dev?\n");
1073 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "regs");
1077 cp
= devm_kzalloc(&pdev
->dev
, sizeof(*cp
), GFP_KERNEL
);
1081 spin_lock_init(&cp
->lock
);
1082 crypto_init_queue(&cp
->queue
, 50);
1083 cp
->reg
= devm_ioremap_resource(&pdev
->dev
, res
);
1084 if (IS_ERR(cp
->reg
)) {
1085 ret
= PTR_ERR(cp
->reg
);
1089 ret
= mv_cesa_get_sram(pdev
, cp
);
1093 cp
->max_req_size
= cp
->sram_size
- SRAM_CFG_SPACE
;
1095 irq
= platform_get_irq(pdev
, 0);
1102 platform_set_drvdata(pdev
, cp
);
1105 cp
->queue_th
= kthread_run(queue_manag
, cp
, "mv_crypto");
1106 if (IS_ERR(cp
->queue_th
)) {
1107 ret
= PTR_ERR(cp
->queue_th
);
1111 ret
= request_irq(irq
, crypto_int
, 0, dev_name(&pdev
->dev
),
1116 /* Not all platforms can gate the clock, so it is not
1117 an error if the clock does not exists. */
1118 cp
->clk
= clk_get(&pdev
->dev
, NULL
);
1119 if (!IS_ERR(cp
->clk
))
1120 clk_prepare_enable(cp
->clk
);
1122 writel(0, cpg
->reg
+ SEC_ACCEL_INT_STATUS
);
1123 writel(SEC_INT_ACCEL0_DONE
, cpg
->reg
+ SEC_ACCEL_INT_MASK
);
1124 writel(SEC_CFG_STOP_DIG_ERR
, cpg
->reg
+ SEC_ACCEL_CFG
);
1125 writel(SRAM_CONFIG
, cpg
->reg
+ SEC_ACCEL_DESC_P0
);
1127 ret
= crypto_register_alg(&mv_aes_alg_ecb
);
1129 printk(KERN_WARNING MV_CESA
1130 "Could not register aes-ecb driver\n");
1134 ret
= crypto_register_alg(&mv_aes_alg_cbc
);
1136 printk(KERN_WARNING MV_CESA
1137 "Could not register aes-cbc driver\n");
1141 ret
= crypto_register_ahash(&mv_sha1_alg
);
1145 printk(KERN_WARNING MV_CESA
"Could not register sha1 driver\n");
1147 ret
= crypto_register_ahash(&mv_hmac_sha1_alg
);
1149 cpg
->has_hmac_sha1
= 1;
1151 printk(KERN_WARNING MV_CESA
1152 "Could not register hmac-sha1 driver\n");
1157 crypto_unregister_alg(&mv_aes_alg_ecb
);
1160 if (!IS_ERR(cp
->clk
)) {
1161 clk_disable_unprepare(cp
->clk
);
1165 kthread_stop(cp
->queue_th
);
1171 static int mv_remove(struct platform_device
*pdev
)
1173 struct crypto_priv
*cp
= platform_get_drvdata(pdev
);
1175 crypto_unregister_alg(&mv_aes_alg_ecb
);
1176 crypto_unregister_alg(&mv_aes_alg_cbc
);
1178 crypto_unregister_ahash(&mv_sha1_alg
);
1179 if (cp
->has_hmac_sha1
)
1180 crypto_unregister_ahash(&mv_hmac_sha1_alg
);
1181 kthread_stop(cp
->queue_th
);
1182 free_irq(cp
->irq
, cp
);
1183 memset(cp
->sram
, 0, cp
->sram_size
);
1185 if (!IS_ERR(cp
->clk
)) {
1186 clk_disable_unprepare(cp
->clk
);
1194 static const struct of_device_id mv_cesa_of_match_table
[] = {
1195 { .compatible
= "marvell,orion-crypto", },
1196 { .compatible
= "marvell,kirkwood-crypto", },
1197 { .compatible
= "marvell,dove-crypto", },
1200 MODULE_DEVICE_TABLE(of
, mv_cesa_of_match_table
);
1202 static struct platform_driver marvell_crypto
= {
1204 .remove
= mv_remove
,
1206 .name
= "mv_crypto",
1207 .of_match_table
= mv_cesa_of_match_table
,
1210 MODULE_ALIAS("platform:mv_crypto");
1212 module_platform_driver(marvell_crypto
);
1214 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1215 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1216 MODULE_LICENSE("GPL");