2 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
4 * Copyright (C) 2014-2017 Axis Communications AB
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/bitfield.h>
9 #include <linux/crypto.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/fault-inject.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
24 #include <crypto/aes.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/scatterwalk.h>
29 #include <crypto/sha.h>
30 #include <crypto/xts.h>
32 /* Max length of a line in all cache levels for Artpec SoCs. */
33 #define ARTPEC_CACHE_LINE_MAX 32
35 #define PDMA_OUT_CFG 0x0000
36 #define PDMA_OUT_BUF_CFG 0x0004
37 #define PDMA_OUT_CMD 0x0008
38 #define PDMA_OUT_DESCRQ_PUSH 0x0010
39 #define PDMA_OUT_DESCRQ_STAT 0x0014
41 #define A6_PDMA_IN_CFG 0x0028
42 #define A6_PDMA_IN_BUF_CFG 0x002c
43 #define A6_PDMA_IN_CMD 0x0030
44 #define A6_PDMA_IN_STATQ_PUSH 0x0038
45 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
46 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
47 #define A6_PDMA_INTR_MASK 0x0068
48 #define A6_PDMA_ACK_INTR 0x006c
49 #define A6_PDMA_MASKED_INTR 0x0074
51 #define A7_PDMA_IN_CFG 0x002c
52 #define A7_PDMA_IN_BUF_CFG 0x0030
53 #define A7_PDMA_IN_CMD 0x0034
54 #define A7_PDMA_IN_STATQ_PUSH 0x003c
55 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
56 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
57 #define A7_PDMA_INTR_MASK 0x006c
58 #define A7_PDMA_ACK_INTR 0x0070
59 #define A7_PDMA_MASKED_INTR 0x0078
61 #define PDMA_OUT_CFG_EN BIT(0)
63 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
64 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
66 #define PDMA_OUT_CMD_START BIT(0)
67 #define A6_PDMA_OUT_CMD_STOP BIT(3)
68 #define A7_PDMA_OUT_CMD_STOP BIT(2)
70 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
71 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
73 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
74 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
76 #define PDMA_IN_CFG_EN BIT(0)
78 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
79 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
80 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
82 #define PDMA_IN_CMD_START BIT(0)
83 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
84 #define A6_PDMA_IN_CMD_STOP BIT(3)
85 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
86 #define A7_PDMA_IN_CMD_STOP BIT(2)
88 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
89 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
91 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
92 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
94 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
95 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
97 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
98 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
99 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
101 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
102 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
103 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
105 #define A6_CRY_MD_OPER GENMASK(19, 16)
107 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
108 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
110 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
111 #define A6_CRY_MD_CIPHER_DECR BIT(22)
112 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
113 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
115 #define A7_CRY_MD_OPER GENMASK(11, 8)
117 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
118 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
120 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
121 #define A7_CRY_MD_CIPHER_DECR BIT(14)
122 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
123 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
125 /* DMA metadata constants */
126 #define regk_crypto_aes_cbc 0x00000002
127 #define regk_crypto_aes_ctr 0x00000003
128 #define regk_crypto_aes_ecb 0x00000001
129 #define regk_crypto_aes_gcm 0x00000004
130 #define regk_crypto_aes_xts 0x00000005
131 #define regk_crypto_cache 0x00000002
132 #define a6_regk_crypto_dlkey 0x0000000a
133 #define a7_regk_crypto_dlkey 0x0000000e
134 #define regk_crypto_ext 0x00000001
135 #define regk_crypto_hmac_sha1 0x00000007
136 #define regk_crypto_hmac_sha256 0x00000009
137 #define regk_crypto_hmac_sha384 0x0000000b
138 #define regk_crypto_hmac_sha512 0x0000000d
139 #define regk_crypto_init 0x00000000
140 #define regk_crypto_key_128 0x00000000
141 #define regk_crypto_key_192 0x00000001
142 #define regk_crypto_key_256 0x00000002
143 #define regk_crypto_null 0x00000000
144 #define regk_crypto_sha1 0x00000006
145 #define regk_crypto_sha256 0x00000008
146 #define regk_crypto_sha384 0x0000000a
147 #define regk_crypto_sha512 0x0000000c
149 /* DMA descriptor structures */
150 struct pdma_descr_ctrl
{
151 unsigned char short_descr
: 1;
152 unsigned char pad1
: 1;
153 unsigned char eop
: 1;
154 unsigned char intr
: 1;
155 unsigned char short_len
: 3;
156 unsigned char pad2
: 1;
159 struct pdma_data_descr
{
160 unsigned int len
: 24;
161 unsigned int buf
: 32;
164 struct pdma_short_descr
{
165 unsigned char data
[7];
169 struct pdma_descr_ctrl ctrl
;
171 struct pdma_data_descr data
;
172 struct pdma_short_descr shrt
;
176 struct pdma_stat_descr
{
177 unsigned char pad1
: 1;
178 unsigned char pad2
: 1;
179 unsigned char eop
: 1;
180 unsigned char pad3
: 5;
181 unsigned int len
: 24;
184 /* Each descriptor array can hold max 64 entries */
185 #define PDMA_DESCR_COUNT 64
187 #define MODULE_NAME "Artpec-6 CA"
189 /* Hash modes (including HMAC variants) */
190 #define ARTPEC6_CRYPTO_HASH_SHA1 1
191 #define ARTPEC6_CRYPTO_HASH_SHA256 2
192 #define ARTPEC6_CRYPTO_HASH_SHA384 3
193 #define ARTPEC6_CRYPTO_HASH_SHA512 4
196 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
197 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
198 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
199 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
201 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
202 * It operates on a descriptor array with up to 64 descriptor entries.
203 * The arrays must be 64 byte aligned in memory.
205 * The ciphering unit has no registers and is completely controlled by
206 * a 4-byte metadata that is inserted at the beginning of each dma packet.
208 * A dma packet is a sequence of descriptors terminated by setting the .eop
209 * field in the final descriptor of the packet.
211 * Multiple packets are used for providing context data, key data and
212 * the plain/ciphertext.
214 * PDMA Descriptors (Array)
215 * +------+------+------+~~+-------+------+----
216 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
217 * +--+---+--+---+----+-+~~+-------+----+-+----
220 * __|__ +-------++-------++-------+ +----+
221 * | MD | |Payload||Payload||Payload| | MD |
222 * +-----+ +-------++-------++-------+ +----+
225 struct artpec6_crypto_bounce_buffer
{
226 struct list_head list
;
228 struct scatterlist
*sg
;
230 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
231 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
236 struct artpec6_crypto_dma_map
{
239 enum dma_data_direction dir
;
242 struct artpec6_crypto_dma_descriptors
{
243 struct pdma_descr out
[PDMA_DESCR_COUNT
] __aligned(64);
244 struct pdma_descr in
[PDMA_DESCR_COUNT
] __aligned(64);
245 u32 stat
[PDMA_DESCR_COUNT
] __aligned(64);
246 struct list_head bounce_buffers
;
247 /* Enough maps for all out/in buffers, and all three descr. arrays */
248 struct artpec6_crypto_dma_map maps
[PDMA_DESCR_COUNT
* 2 + 2];
249 dma_addr_t out_dma_addr
;
250 dma_addr_t in_dma_addr
;
251 dma_addr_t stat_dma_addr
;
257 enum artpec6_crypto_variant
{
262 struct artpec6_crypto
{
264 spinlock_t queue_lock
;
265 struct list_head queue
; /* waiting for pdma fifo space */
266 struct list_head pending
; /* submitted to pdma fifo */
267 struct tasklet_struct task
;
268 struct kmem_cache
*dma_cache
;
270 struct timer_list timer
;
271 enum artpec6_crypto_variant variant
;
272 void *pad_buffer
; /* cache-aligned block padding buffer */
276 enum artpec6_crypto_hash_flags
{
277 HASH_FLAG_INIT_CTX
= 2,
278 HASH_FLAG_UPDATE
= 4,
279 HASH_FLAG_FINALIZE
= 8,
281 HASH_FLAG_UPDATE_KEY
= 32,
284 struct artpec6_crypto_req_common
{
285 struct list_head list
;
286 struct artpec6_crypto_dma_descriptors
*dma
;
287 struct crypto_async_request
*req
;
288 void (*complete
)(struct crypto_async_request
*req
);
292 struct artpec6_hash_request_context
{
293 char partial_buffer
[SHA512_BLOCK_SIZE
];
294 char partial_buffer_out
[SHA512_BLOCK_SIZE
];
295 char key_buffer
[SHA512_BLOCK_SIZE
];
296 char pad_buffer
[SHA512_BLOCK_SIZE
+ 32];
297 unsigned char digeststate
[SHA512_DIGEST_SIZE
];
298 size_t partial_bytes
;
302 enum artpec6_crypto_hash_flags hash_flags
;
303 struct artpec6_crypto_req_common common
;
306 struct artpec6_hash_export_state
{
307 char partial_buffer
[SHA512_BLOCK_SIZE
];
308 unsigned char digeststate
[SHA512_DIGEST_SIZE
];
309 size_t partial_bytes
;
312 unsigned int hash_flags
;
315 struct artpec6_hashalg_context
{
316 char hmac_key
[SHA512_BLOCK_SIZE
];
317 size_t hmac_key_length
;
318 struct crypto_shash
*child_hash
;
321 struct artpec6_crypto_request_context
{
324 struct artpec6_crypto_req_common common
;
327 struct artpec6_cryptotfm_context
{
328 unsigned char aes_key
[2*AES_MAX_KEY_SIZE
];
332 struct crypto_skcipher
*fallback
;
335 struct artpec6_crypto_aead_hw_ctx
{
336 __be64 aad_length_bits
;
337 __be64 text_length_bits
;
338 __u8 J0
[AES_BLOCK_SIZE
];
341 struct artpec6_crypto_aead_req_ctx
{
342 struct artpec6_crypto_aead_hw_ctx hw_ctx
;
345 struct artpec6_crypto_req_common common
;
346 __u8 decryption_tag
[AES_BLOCK_SIZE
] ____cacheline_aligned
;
349 /* The crypto framework makes it hard to avoid this global. */
350 static struct device
*artpec6_crypto_dev
;
352 #ifdef CONFIG_FAULT_INJECTION
353 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read
);
354 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full
);
358 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
,
359 ARTPEC6_CRYPTO_PREPARE_HASH_START
,
362 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
);
363 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
);
364 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
);
367 artpec6_crypto_complete_crypto(struct crypto_async_request
*req
);
369 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
);
371 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
);
373 artpec6_crypto_complete_aead(struct crypto_async_request
*req
);
375 artpec6_crypto_complete_hash(struct crypto_async_request
*req
);
378 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
);
381 artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
);
383 struct artpec6_crypto_walk
{
384 struct scatterlist
*sg
;
388 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk
*awalk
,
389 struct scatterlist
*sg
)
395 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk
*awalk
,
398 while (nbytes
&& awalk
->sg
) {
401 WARN_ON(awalk
->offset
> awalk
->sg
->length
);
403 piece
= min(nbytes
, (size_t)awalk
->sg
->length
- awalk
->offset
);
405 awalk
->offset
+= piece
;
406 if (awalk
->offset
== awalk
->sg
->length
) {
407 awalk
->sg
= sg_next(awalk
->sg
);
417 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk
*awalk
)
419 WARN_ON(awalk
->sg
->length
== awalk
->offset
);
421 return awalk
->sg
->length
- awalk
->offset
;
425 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk
*awalk
)
427 return sg_phys(awalk
->sg
) + awalk
->offset
;
431 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common
*common
)
433 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
434 struct artpec6_crypto_bounce_buffer
*b
;
435 struct artpec6_crypto_bounce_buffer
*next
;
437 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
438 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
439 b
, b
->length
, b
->offset
, b
->buf
);
440 sg_pcopy_from_buffer(b
->sg
,
451 static inline bool artpec6_crypto_busy(void)
453 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
454 int fifo_count
= ac
->pending_count
;
456 return fifo_count
> 6;
459 static int artpec6_crypto_submit(struct artpec6_crypto_req_common
*req
)
461 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
464 spin_lock_bh(&ac
->queue_lock
);
466 if (!artpec6_crypto_busy()) {
467 list_add_tail(&req
->list
, &ac
->pending
);
468 artpec6_crypto_start_dma(req
);
470 } else if (req
->req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
471 list_add_tail(&req
->list
, &ac
->queue
);
473 artpec6_crypto_common_destroy(req
);
476 spin_unlock_bh(&ac
->queue_lock
);
481 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common
*common
)
483 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
484 enum artpec6_crypto_variant variant
= ac
->variant
;
485 void __iomem
*base
= ac
->base
;
486 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
487 u32 ind
, statd
, outd
;
489 /* Make descriptor content visible to the DMA before starting it. */
492 ind
= FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN
, dma
->in_cnt
- 1) |
493 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR
, dma
->in_dma_addr
>> 6);
495 statd
= FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN
, dma
->in_cnt
- 1) |
496 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR
, dma
->stat_dma_addr
>> 6);
498 outd
= FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN
, dma
->out_cnt
- 1) |
499 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR
, dma
->out_dma_addr
>> 6);
501 if (variant
== ARTPEC6_CRYPTO
) {
502 writel_relaxed(ind
, base
+ A6_PDMA_IN_DESCRQ_PUSH
);
503 writel_relaxed(statd
, base
+ A6_PDMA_IN_STATQ_PUSH
);
504 writel_relaxed(PDMA_IN_CMD_START
, base
+ A6_PDMA_IN_CMD
);
506 writel_relaxed(ind
, base
+ A7_PDMA_IN_DESCRQ_PUSH
);
507 writel_relaxed(statd
, base
+ A7_PDMA_IN_STATQ_PUSH
);
508 writel_relaxed(PDMA_IN_CMD_START
, base
+ A7_PDMA_IN_CMD
);
511 writel_relaxed(outd
, base
+ PDMA_OUT_DESCRQ_PUSH
);
512 writel_relaxed(PDMA_OUT_CMD_START
, base
+ PDMA_OUT_CMD
);
518 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common
*common
)
520 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
525 INIT_LIST_HEAD(&dma
->bounce_buffers
);
528 static bool fault_inject_dma_descr(void)
530 #ifdef CONFIG_FAULT_INJECTION
531 return should_fail(&artpec6_crypto_fail_dma_array_full
, 1);
537 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
540 * @addr: The physical address of the data buffer
541 * @len: The length of the data buffer
542 * @eop: True if this is the last buffer in the packet
544 * @return 0 on success or -ENOSPC if there are no more descriptors available
547 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common
*common
,
548 dma_addr_t addr
, size_t len
, bool eop
)
550 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
551 struct pdma_descr
*d
;
553 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
554 fault_inject_dma_descr()) {
555 pr_err("No free OUT DMA descriptors available!\n");
559 d
= &dma
->out
[dma
->out_cnt
++];
560 memset(d
, 0, sizeof(*d
));
562 d
->ctrl
.short_descr
= 0;
569 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
571 * @dst: The virtual address of the data
572 * @len: The length of the data, must be between 1 to 7 bytes
573 * @eop: True if this is the last buffer in the packet
575 * @return 0 on success
576 * -ENOSPC if no more descriptors are available
577 * -EINVAL if the data length exceeds 7 bytes
580 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common
*common
,
581 void *dst
, unsigned int len
, bool eop
)
583 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
584 struct pdma_descr
*d
;
586 if (dma
->out_cnt
>= PDMA_DESCR_COUNT
||
587 fault_inject_dma_descr()) {
588 pr_err("No free OUT DMA descriptors available!\n");
590 } else if (len
> 7 || len
< 1) {
593 d
= &dma
->out
[dma
->out_cnt
++];
594 memset(d
, 0, sizeof(*d
));
596 d
->ctrl
.short_descr
= 1;
597 d
->ctrl
.short_len
= len
;
599 memcpy(d
->shrt
.data
, dst
, len
);
603 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common
*common
,
604 struct page
*page
, size_t offset
,
606 enum dma_data_direction dir
,
607 dma_addr_t
*dma_addr_out
)
609 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
610 struct device
*dev
= artpec6_crypto_dev
;
611 struct artpec6_crypto_dma_map
*map
;
616 if (dma
->map_count
>= ARRAY_SIZE(dma
->maps
))
619 dma_addr
= dma_map_page(dev
, page
, offset
, size
, dir
);
620 if (dma_mapping_error(dev
, dma_addr
))
623 map
= &dma
->maps
[dma
->map_count
++];
625 map
->dma_addr
= dma_addr
;
628 *dma_addr_out
= dma_addr
;
634 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common
*common
,
635 void *ptr
, size_t size
,
636 enum dma_data_direction dir
,
637 dma_addr_t
*dma_addr_out
)
639 struct page
*page
= virt_to_page(ptr
);
640 size_t offset
= (uintptr_t)ptr
& ~PAGE_MASK
;
642 return artpec6_crypto_dma_map_page(common
, page
, offset
, size
, dir
,
647 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common
*common
)
649 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
652 ret
= artpec6_crypto_dma_map_single(common
, dma
->in
,
653 sizeof(dma
->in
[0]) * dma
->in_cnt
,
654 DMA_TO_DEVICE
, &dma
->in_dma_addr
);
658 ret
= artpec6_crypto_dma_map_single(common
, dma
->out
,
659 sizeof(dma
->out
[0]) * dma
->out_cnt
,
660 DMA_TO_DEVICE
, &dma
->out_dma_addr
);
664 /* We only read one stat descriptor */
665 dma
->stat
[dma
->in_cnt
- 1] = 0;
668 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
671 return artpec6_crypto_dma_map_single(common
,
672 dma
->stat
+ dma
->in_cnt
- 1,
673 sizeof(dma
->stat
[0]),
675 &dma
->stat_dma_addr
);
679 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common
*common
)
681 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
682 struct device
*dev
= artpec6_crypto_dev
;
685 for (i
= 0; i
< dma
->map_count
; i
++) {
686 struct artpec6_crypto_dma_map
*map
= &dma
->maps
[i
];
688 dma_unmap_page(dev
, map
->dma_addr
, map
->size
, map
->dir
);
694 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
696 * @dst: The virtual address of the data
697 * @len: The length of the data
698 * @eop: True if this is the last buffer in the packet
699 * @use_short: If this is true and the data length is 7 bytes or less then
700 * a short descriptor will be used
702 * @return 0 on success
703 * Any errors from artpec6_crypto_setup_out_descr_short() or
704 * setup_out_descr_phys()
707 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common
*common
,
708 void *dst
, unsigned int len
, bool eop
,
711 if (use_short
&& len
< 7) {
712 return artpec6_crypto_setup_out_descr_short(common
, dst
, len
,
718 ret
= artpec6_crypto_dma_map_single(common
, dst
, len
,
724 return artpec6_crypto_setup_out_descr_phys(common
, dma_addr
,
729 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
732 * @addr: The physical address of the data buffer
733 * @len: The length of the data buffer
734 * @intr: True if an interrupt should be fired after HW processing of this
739 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common
*common
,
740 dma_addr_t addr
, unsigned int len
, bool intr
)
742 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
743 struct pdma_descr
*d
;
745 if (dma
->in_cnt
>= PDMA_DESCR_COUNT
||
746 fault_inject_dma_descr()) {
747 pr_err("No free IN DMA descriptors available!\n");
750 d
= &dma
->in
[dma
->in_cnt
++];
751 memset(d
, 0, sizeof(*d
));
759 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
761 * @buffer: The virtual address to of the data buffer
762 * @len: The length of the data buffer
763 * @last: If this is the last data buffer in the request (i.e. an interrupt
766 * Short descriptors are not used for the in channel
769 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common
*common
,
770 void *buffer
, unsigned int len
, bool last
)
775 ret
= artpec6_crypto_dma_map_single(common
, buffer
, len
,
776 DMA_FROM_DEVICE
, &dma_addr
);
780 return artpec6_crypto_setup_in_descr_phys(common
, dma_addr
, len
, last
);
783 static struct artpec6_crypto_bounce_buffer
*
784 artpec6_crypto_alloc_bounce(gfp_t flags
)
787 size_t alloc_size
= sizeof(struct artpec6_crypto_bounce_buffer
) +
788 2 * ARTPEC_CACHE_LINE_MAX
;
789 struct artpec6_crypto_bounce_buffer
*bbuf
= kzalloc(alloc_size
, flags
);
795 bbuf
->buf
= PTR_ALIGN(base
, ARTPEC_CACHE_LINE_MAX
);
799 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common
*common
,
800 struct artpec6_crypto_walk
*walk
, size_t size
)
802 struct artpec6_crypto_bounce_buffer
*bbuf
;
805 bbuf
= artpec6_crypto_alloc_bounce(common
->gfp_flags
);
811 bbuf
->offset
= walk
->offset
;
813 ret
= artpec6_crypto_setup_in_descr(common
, bbuf
->buf
, size
, false);
819 pr_debug("BOUNCE %zu offset %zu\n", size
, walk
->offset
);
820 list_add_tail(&bbuf
->list
, &common
->dma
->bounce_buffers
);
825 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common
*common
,
826 struct artpec6_crypto_walk
*walk
,
833 while (walk
->sg
&& count
) {
834 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
835 addr
= artpec6_crypto_walk_chunk_phys(walk
);
837 /* When destination buffers are not aligned to the cache line
838 * size we need bounce buffers. The DMA-API requires that the
839 * entire line is owned by the DMA buffer and this holds also
840 * for the case when coherent DMA is used.
842 if (!IS_ALIGNED(addr
, ARTPEC_CACHE_LINE_MAX
)) {
843 chunk
= min_t(dma_addr_t
, chunk
,
844 ALIGN(addr
, ARTPEC_CACHE_LINE_MAX
) -
847 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
848 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
849 } else if (chunk
< ARTPEC_CACHE_LINE_MAX
) {
850 pr_debug("CHUNK-b %pad:%zu\n", &addr
, chunk
);
851 ret
= setup_bounce_buffer_in(common
, walk
, chunk
);
855 chunk
= chunk
& ~(ARTPEC_CACHE_LINE_MAX
-1);
857 pr_debug("CHUNK %pad:%zu\n", &addr
, chunk
);
859 ret
= artpec6_crypto_dma_map_page(common
,
869 ret
= artpec6_crypto_setup_in_descr_phys(common
,
877 count
= count
- chunk
;
878 artpec6_crypto_walk_advance(walk
, chunk
);
882 pr_err("EOL unexpected %zu bytes left\n", count
);
884 return count
? -EINVAL
: 0;
888 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common
*common
,
889 struct artpec6_crypto_walk
*walk
,
896 while (walk
->sg
&& count
) {
897 chunk
= min(count
, artpec6_crypto_walk_chunklen(walk
));
898 addr
= artpec6_crypto_walk_chunk_phys(walk
);
900 pr_debug("OUT-CHUNK %pad:%zu\n", &addr
, chunk
);
905 chunk
= min_t(size_t, chunk
, (4-(addr
&3)));
907 sg_pcopy_to_buffer(walk
->sg
, 1, buf
, chunk
,
910 ret
= artpec6_crypto_setup_out_descr_short(common
, buf
,
916 ret
= artpec6_crypto_dma_map_page(common
,
926 ret
= artpec6_crypto_setup_out_descr_phys(common
,
934 count
= count
- chunk
;
935 artpec6_crypto_walk_advance(walk
, chunk
);
939 pr_err("EOL unexpected %zu bytes left\n", count
);
941 return count
? -EINVAL
: 0;
945 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
947 * If the out descriptor list is non-empty, then the eop flag on the
948 * last used out descriptor will be set.
950 * @return 0 on success
951 * -EINVAL if the out descriptor is empty or has overflown
954 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common
*common
)
956 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
957 struct pdma_descr
*d
;
959 if (!dma
->out_cnt
|| dma
->out_cnt
> PDMA_DESCR_COUNT
) {
960 pr_err("%s: OUT descriptor list is %s\n",
961 MODULE_NAME
, dma
->out_cnt
? "empty" : "full");
966 d
= &dma
->out
[dma
->out_cnt
-1];
972 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
975 * See artpec6_crypto_terminate_out_descrs() for return values
978 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common
*common
)
980 struct artpec6_crypto_dma_descriptors
*dma
= common
->dma
;
981 struct pdma_descr
*d
;
983 if (!dma
->in_cnt
|| dma
->in_cnt
> PDMA_DESCR_COUNT
) {
984 pr_err("%s: IN descriptor list is %s\n",
985 MODULE_NAME
, dma
->in_cnt
? "empty" : "full");
989 d
= &dma
->in
[dma
->in_cnt
-1];
994 /** create_hash_pad - Create a Secure Hash conformant pad
996 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
997 * @dgstlen: The total length of the hash digest in bytes
998 * @bitcount: The total length of the digest in bits
1000 * @return The total number of padding bytes written to @dst
1003 create_hash_pad(int oper
, unsigned char *dst
, u64 dgstlen
, u64 bitcount
)
1005 unsigned int mod
, target
, diff
, pad_bytes
, size_bytes
;
1006 __be64 bits
= __cpu_to_be64(bitcount
);
1009 case regk_crypto_sha1
:
1010 case regk_crypto_sha256
:
1011 case regk_crypto_hmac_sha1
:
1012 case regk_crypto_hmac_sha256
:
1025 diff
= dgstlen
& (mod
- 1);
1026 pad_bytes
= diff
> target
? target
+ mod
- diff
: target
- diff
;
1028 memset(dst
+ 1, 0, pad_bytes
);
1031 if (size_bytes
== 16) {
1032 memset(dst
+ 1 + pad_bytes
, 0, 8);
1033 memcpy(dst
+ 1 + pad_bytes
+ 8, &bits
, 8);
1035 memcpy(dst
+ 1 + pad_bytes
, &bits
, 8);
1038 return pad_bytes
+ size_bytes
+ 1;
1041 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common
*common
,
1042 struct crypto_async_request
*parent
,
1043 void (*complete
)(struct crypto_async_request
*req
),
1044 struct scatterlist
*dstsg
, unsigned int nbytes
)
1047 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1049 flags
= (parent
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
1050 GFP_KERNEL
: GFP_ATOMIC
;
1052 common
->gfp_flags
= flags
;
1053 common
->dma
= kmem_cache_alloc(ac
->dma_cache
, flags
);
1057 common
->req
= parent
;
1058 common
->complete
= complete
;
1063 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors
*dma
)
1065 struct artpec6_crypto_bounce_buffer
*b
;
1066 struct artpec6_crypto_bounce_buffer
*next
;
1068 list_for_each_entry_safe(b
, next
, &dma
->bounce_buffers
, list
) {
1074 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common
*common
)
1076 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1078 artpec6_crypto_dma_unmap_all(common
);
1079 artpec6_crypto_bounce_destroy(common
->dma
);
1080 kmem_cache_free(ac
->dma_cache
, common
->dma
);
1086 * Ciphering functions.
1088 static int artpec6_crypto_encrypt(struct skcipher_request
*req
)
1090 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1091 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1092 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1093 void (*complete
)(struct crypto_async_request
*req
);
1096 req_ctx
= skcipher_request_ctx(req
);
1098 switch (ctx
->crypto_type
) {
1099 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1100 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1101 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1102 req_ctx
->decrypt
= 0;
1108 switch (ctx
->crypto_type
) {
1109 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1110 complete
= artpec6_crypto_complete_cbc_encrypt
;
1113 complete
= artpec6_crypto_complete_crypto
;
1117 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1120 req
->dst
, req
->cryptlen
);
1124 ret
= artpec6_crypto_prepare_crypto(req
);
1126 artpec6_crypto_common_destroy(&req_ctx
->common
);
1130 return artpec6_crypto_submit(&req_ctx
->common
);
1133 static int artpec6_crypto_decrypt(struct skcipher_request
*req
)
1136 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1137 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1138 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1139 void (*complete
)(struct crypto_async_request
*req
);
1141 req_ctx
= skcipher_request_ctx(req
);
1143 switch (ctx
->crypto_type
) {
1144 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1145 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1146 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1147 req_ctx
->decrypt
= 1;
1154 switch (ctx
->crypto_type
) {
1155 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1156 complete
= artpec6_crypto_complete_cbc_decrypt
;
1159 complete
= artpec6_crypto_complete_crypto
;
1163 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1165 req
->dst
, req
->cryptlen
);
1169 ret
= artpec6_crypto_prepare_crypto(req
);
1171 artpec6_crypto_common_destroy(&req_ctx
->common
);
1175 return artpec6_crypto_submit(&req_ctx
->common
);
1179 artpec6_crypto_ctr_crypt(struct skcipher_request
*req
, bool encrypt
)
1181 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(req
);
1182 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1183 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1184 unsigned int counter
= be32_to_cpup((__be32
*)
1185 (req
->iv
+ iv_len
- 4));
1186 unsigned int nblks
= ALIGN(req
->cryptlen
, AES_BLOCK_SIZE
) /
1190 * The hardware uses only the last 32-bits as the counter while the
1191 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1192 * the whole IV is a counter. So fallback if the counter is going to
1195 if (counter
+ nblks
< counter
) {
1198 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1199 counter
, counter
+ nblks
);
1201 ret
= crypto_skcipher_setkey(ctx
->fallback
, ctx
->aes_key
,
1207 SKCIPHER_REQUEST_ON_STACK(subreq
, ctx
->fallback
);
1209 skcipher_request_set_tfm(subreq
, ctx
->fallback
);
1210 skcipher_request_set_callback(subreq
, req
->base
.flags
,
1212 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
1213 req
->cryptlen
, req
->iv
);
1214 ret
= encrypt
? crypto_skcipher_encrypt(subreq
)
1215 : crypto_skcipher_decrypt(subreq
);
1216 skcipher_request_zero(subreq
);
1221 return encrypt
? artpec6_crypto_encrypt(req
)
1222 : artpec6_crypto_decrypt(req
);
1225 static int artpec6_crypto_ctr_encrypt(struct skcipher_request
*req
)
1227 return artpec6_crypto_ctr_crypt(req
, true);
1230 static int artpec6_crypto_ctr_decrypt(struct skcipher_request
*req
)
1232 return artpec6_crypto_ctr_crypt(req
, false);
1238 static int artpec6_crypto_aead_init(struct crypto_aead
*tfm
)
1240 struct artpec6_cryptotfm_context
*tfm_ctx
= crypto_aead_ctx(tfm
);
1242 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
1244 crypto_aead_set_reqsize(tfm
,
1245 sizeof(struct artpec6_crypto_aead_req_ctx
));
1250 static int artpec6_crypto_aead_set_key(struct crypto_aead
*tfm
, const u8
*key
,
1253 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(&tfm
->base
);
1255 if (len
!= 16 && len
!= 24 && len
!= 32) {
1256 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1260 ctx
->key_length
= len
;
1262 memcpy(ctx
->aes_key
, key
, len
);
1266 static int artpec6_crypto_aead_encrypt(struct aead_request
*req
)
1269 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1271 req_ctx
->decrypt
= false;
1272 ret
= artpec6_crypto_common_init(&req_ctx
->common
, &req
->base
,
1273 artpec6_crypto_complete_aead
,
1278 ret
= artpec6_crypto_prepare_aead(req
);
1280 artpec6_crypto_common_destroy(&req_ctx
->common
);
1284 return artpec6_crypto_submit(&req_ctx
->common
);
1287 static int artpec6_crypto_aead_decrypt(struct aead_request
*req
)
1290 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1292 req_ctx
->decrypt
= true;
1293 if (req
->cryptlen
< AES_BLOCK_SIZE
)
1296 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
1298 artpec6_crypto_complete_aead
,
1303 ret
= artpec6_crypto_prepare_aead(req
);
1305 artpec6_crypto_common_destroy(&req_ctx
->common
);
1309 return artpec6_crypto_submit(&req_ctx
->common
);
1312 static int artpec6_crypto_prepare_hash(struct ahash_request
*areq
)
1314 struct artpec6_hashalg_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1315 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(areq
);
1316 size_t digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(areq
));
1317 size_t contextsize
= digestsize
== SHA384_DIGEST_SIZE
?
1318 SHA512_DIGEST_SIZE
: digestsize
;
1319 size_t blocksize
= crypto_tfm_alg_blocksize(
1320 crypto_ahash_tfm(crypto_ahash_reqtfm(areq
)));
1321 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1322 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1323 enum artpec6_crypto_variant variant
= ac
->variant
;
1325 bool ext_ctx
= false;
1326 bool run_hw
= false;
1329 artpec6_crypto_init_dma_operation(common
);
1331 /* Upload HMAC key, must be first the first packet */
1332 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
) {
1333 if (variant
== ARTPEC6_CRYPTO
) {
1334 req_ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1335 a6_regk_crypto_dlkey
);
1337 req_ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1338 a7_regk_crypto_dlkey
);
1341 /* Copy and pad up the key */
1342 memcpy(req_ctx
->key_buffer
, ctx
->hmac_key
,
1343 ctx
->hmac_key_length
);
1344 memset(req_ctx
->key_buffer
+ ctx
->hmac_key_length
, 0,
1345 blocksize
- ctx
->hmac_key_length
);
1347 error
= artpec6_crypto_setup_out_descr(common
,
1348 (void *)&req_ctx
->key_md
,
1349 sizeof(req_ctx
->key_md
), false, false);
1353 error
= artpec6_crypto_setup_out_descr(common
,
1354 req_ctx
->key_buffer
, blocksize
,
1360 if (!(req_ctx
->hash_flags
& HASH_FLAG_INIT_CTX
)) {
1361 /* Restore context */
1362 sel_ctx
= regk_crypto_ext
;
1365 sel_ctx
= regk_crypto_init
;
1368 if (variant
== ARTPEC6_CRYPTO
) {
1369 req_ctx
->hash_md
&= ~A6_CRY_MD_HASH_SEL_CTX
;
1370 req_ctx
->hash_md
|= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1372 /* If this is the final round, set the final flag */
1373 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1374 req_ctx
->hash_md
|= A6_CRY_MD_HASH_HMAC_FIN
;
1376 req_ctx
->hash_md
&= ~A7_CRY_MD_HASH_SEL_CTX
;
1377 req_ctx
->hash_md
|= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX
, sel_ctx
);
1379 /* If this is the final round, set the final flag */
1380 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
)
1381 req_ctx
->hash_md
|= A7_CRY_MD_HASH_HMAC_FIN
;
1384 /* Setup up metadata descriptors */
1385 error
= artpec6_crypto_setup_out_descr(common
,
1386 (void *)&req_ctx
->hash_md
,
1387 sizeof(req_ctx
->hash_md
), false, false);
1391 error
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1396 error
= artpec6_crypto_setup_out_descr(common
,
1397 req_ctx
->digeststate
,
1398 contextsize
, false, false);
1404 if (req_ctx
->hash_flags
& HASH_FLAG_UPDATE
) {
1405 size_t done_bytes
= 0;
1406 size_t total_bytes
= areq
->nbytes
+ req_ctx
->partial_bytes
;
1407 size_t ready_bytes
= round_down(total_bytes
, blocksize
);
1408 struct artpec6_crypto_walk walk
;
1410 run_hw
= ready_bytes
> 0;
1411 if (req_ctx
->partial_bytes
&& ready_bytes
) {
1412 /* We have a partial buffer and will at least some bytes
1413 * to the HW. Empty this partial buffer before tackling
1416 memcpy(req_ctx
->partial_buffer_out
,
1417 req_ctx
->partial_buffer
,
1418 req_ctx
->partial_bytes
);
1420 error
= artpec6_crypto_setup_out_descr(common
,
1421 req_ctx
->partial_buffer_out
,
1422 req_ctx
->partial_bytes
,
1427 /* Reset partial buffer */
1428 done_bytes
+= req_ctx
->partial_bytes
;
1429 req_ctx
->partial_bytes
= 0;
1432 artpec6_crypto_walk_init(&walk
, areq
->src
);
1434 error
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
,
1441 size_t sg_skip
= ready_bytes
- done_bytes
;
1442 size_t sg_rem
= areq
->nbytes
- sg_skip
;
1444 sg_pcopy_to_buffer(areq
->src
, sg_nents(areq
->src
),
1445 req_ctx
->partial_buffer
+
1446 req_ctx
->partial_bytes
,
1449 req_ctx
->partial_bytes
+= sg_rem
;
1452 req_ctx
->digcnt
+= ready_bytes
;
1453 req_ctx
->hash_flags
&= ~(HASH_FLAG_UPDATE
);
1457 if (req_ctx
->hash_flags
& HASH_FLAG_FINALIZE
) {
1458 bool needtrim
= contextsize
!= digestsize
;
1459 size_t hash_pad_len
;
1463 if (variant
== ARTPEC6_CRYPTO
)
1464 oper
= FIELD_GET(A6_CRY_MD_OPER
, req_ctx
->hash_md
);
1466 oper
= FIELD_GET(A7_CRY_MD_OPER
, req_ctx
->hash_md
);
1468 /* Write out the partial buffer if present */
1469 if (req_ctx
->partial_bytes
) {
1470 memcpy(req_ctx
->partial_buffer_out
,
1471 req_ctx
->partial_buffer
,
1472 req_ctx
->partial_bytes
);
1473 error
= artpec6_crypto_setup_out_descr(common
,
1474 req_ctx
->partial_buffer_out
,
1475 req_ctx
->partial_bytes
,
1480 req_ctx
->digcnt
+= req_ctx
->partial_bytes
;
1481 req_ctx
->partial_bytes
= 0;
1484 if (req_ctx
->hash_flags
& HASH_FLAG_HMAC
)
1485 digest_bits
= 8 * (req_ctx
->digcnt
+ blocksize
);
1487 digest_bits
= 8 * req_ctx
->digcnt
;
1489 /* Add the hash pad */
1490 hash_pad_len
= create_hash_pad(oper
, req_ctx
->pad_buffer
,
1491 req_ctx
->digcnt
, digest_bits
);
1492 error
= artpec6_crypto_setup_out_descr(common
,
1493 req_ctx
->pad_buffer
,
1494 hash_pad_len
, false,
1496 req_ctx
->digcnt
= 0;
1501 /* Descriptor for the final result */
1502 error
= artpec6_crypto_setup_in_descr(common
, areq
->result
,
1509 /* Discard the extra context bytes for SHA-384 */
1510 error
= artpec6_crypto_setup_in_descr(common
,
1511 req_ctx
->partial_buffer
,
1512 digestsize
- contextsize
, true);
1517 } else { /* This is not the final operation for this request */
1519 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
;
1521 /* Save the result to the context */
1522 error
= artpec6_crypto_setup_in_descr(common
,
1523 req_ctx
->digeststate
,
1524 contextsize
, false);
1530 req_ctx
->hash_flags
&= ~(HASH_FLAG_INIT_CTX
| HASH_FLAG_UPDATE
|
1531 HASH_FLAG_FINALIZE
);
1533 error
= artpec6_crypto_terminate_in_descrs(common
);
1537 error
= artpec6_crypto_terminate_out_descrs(common
);
1541 error
= artpec6_crypto_dma_map_descs(common
);
1545 return ARTPEC6_CRYPTO_PREPARE_HASH_START
;
1549 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher
*tfm
)
1551 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1553 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1554 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_ECB
;
1559 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher
*tfm
)
1561 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1563 ctx
->fallback
= crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm
->base
),
1566 CRYPTO_ALG_NEED_FALLBACK
);
1567 if (IS_ERR(ctx
->fallback
))
1568 return PTR_ERR(ctx
->fallback
);
1570 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1571 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CTR
;
1576 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher
*tfm
)
1578 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1580 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1581 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_CBC
;
1586 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher
*tfm
)
1588 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1590 tfm
->reqsize
= sizeof(struct artpec6_crypto_request_context
);
1591 ctx
->crypto_type
= ARTPEC6_CRYPTO_CIPHER_AES_XTS
;
1596 static void artpec6_crypto_aes_exit(struct crypto_skcipher
*tfm
)
1598 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1600 memset(ctx
, 0, sizeof(*ctx
));
1603 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher
*tfm
)
1605 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(tfm
);
1607 crypto_free_skcipher(ctx
->fallback
);
1608 artpec6_crypto_aes_exit(tfm
);
1612 artpec6_crypto_cipher_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1613 unsigned int keylen
)
1615 struct artpec6_cryptotfm_context
*ctx
=
1616 crypto_skcipher_ctx(cipher
);
1624 crypto_skcipher_set_flags(cipher
,
1625 CRYPTO_TFM_RES_BAD_KEY_LEN
);
1629 memcpy(ctx
->aes_key
, key
, keylen
);
1630 ctx
->key_length
= keylen
;
1635 artpec6_crypto_xts_set_key(struct crypto_skcipher
*cipher
, const u8
*key
,
1636 unsigned int keylen
)
1638 struct artpec6_cryptotfm_context
*ctx
=
1639 crypto_skcipher_ctx(cipher
);
1642 ret
= xts_check_key(&cipher
->base
, key
, keylen
);
1652 crypto_skcipher_set_flags(cipher
,
1653 CRYPTO_TFM_RES_BAD_KEY_LEN
);
1657 memcpy(ctx
->aes_key
, key
, keylen
);
1658 ctx
->key_length
= keylen
;
1662 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1664 * @req: The asynch request to process
1666 * @return 0 if the dma job was successfully prepared
1669 * This function sets up the PDMA descriptors for a block cipher request.
1671 * The required padding is added for AES-CTR using a statically defined
1674 * The PDMA descriptor list will be as follows:
1676 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1677 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1680 static int artpec6_crypto_prepare_crypto(struct skcipher_request
*areq
)
1683 struct artpec6_crypto_walk walk
;
1684 struct crypto_skcipher
*cipher
= crypto_skcipher_reqtfm(areq
);
1685 struct artpec6_cryptotfm_context
*ctx
= crypto_skcipher_ctx(cipher
);
1686 struct artpec6_crypto_request_context
*req_ctx
= NULL
;
1687 size_t iv_len
= crypto_skcipher_ivsize(cipher
);
1688 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1689 enum artpec6_crypto_variant variant
= ac
->variant
;
1690 struct artpec6_crypto_req_common
*common
;
1691 bool cipher_decr
= false;
1693 u32 cipher_len
= 0; /* Same as regk_crypto_key_128 for NULL crypto */
1696 req_ctx
= skcipher_request_ctx(areq
);
1697 common
= &req_ctx
->common
;
1699 artpec6_crypto_init_dma_operation(common
);
1701 if (variant
== ARTPEC6_CRYPTO
)
1702 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
, a6_regk_crypto_dlkey
);
1704 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
, a7_regk_crypto_dlkey
);
1706 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1707 sizeof(ctx
->key_md
), false, false);
1711 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1712 ctx
->key_length
, true, false);
1716 req_ctx
->cipher_md
= 0;
1718 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
)
1719 cipher_klen
= ctx
->key_length
/2;
1721 cipher_klen
= ctx
->key_length
;
1724 switch (cipher_klen
) {
1726 cipher_len
= regk_crypto_key_128
;
1729 cipher_len
= regk_crypto_key_192
;
1732 cipher_len
= regk_crypto_key_256
;
1735 pr_err("%s: Invalid key length %d!\n",
1736 MODULE_NAME
, ctx
->key_length
);
1740 switch (ctx
->crypto_type
) {
1741 case ARTPEC6_CRYPTO_CIPHER_AES_ECB
:
1742 oper
= regk_crypto_aes_ecb
;
1743 cipher_decr
= req_ctx
->decrypt
;
1746 case ARTPEC6_CRYPTO_CIPHER_AES_CBC
:
1747 oper
= regk_crypto_aes_cbc
;
1748 cipher_decr
= req_ctx
->decrypt
;
1751 case ARTPEC6_CRYPTO_CIPHER_AES_CTR
:
1752 oper
= regk_crypto_aes_ctr
;
1753 cipher_decr
= false;
1756 case ARTPEC6_CRYPTO_CIPHER_AES_XTS
:
1757 oper
= regk_crypto_aes_xts
;
1758 cipher_decr
= req_ctx
->decrypt
;
1760 if (variant
== ARTPEC6_CRYPTO
)
1761 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DSEQ
;
1763 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DSEQ
;
1767 pr_err("%s: Invalid cipher mode %d!\n",
1768 MODULE_NAME
, ctx
->crypto_type
);
1772 if (variant
== ARTPEC6_CRYPTO
) {
1773 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
1774 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1777 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1779 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
1780 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1783 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1786 ret
= artpec6_crypto_setup_out_descr(common
,
1787 &req_ctx
->cipher_md
,
1788 sizeof(req_ctx
->cipher_md
),
1793 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1798 ret
= artpec6_crypto_setup_out_descr(common
, areq
->iv
, iv_len
,
1804 artpec6_crypto_walk_init(&walk
, areq
->src
);
1805 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, areq
->cryptlen
);
1810 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1811 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, areq
->cryptlen
);
1815 /* CTR-mode padding required by the HW. */
1816 if (ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_CTR
||
1817 ctx
->crypto_type
== ARTPEC6_CRYPTO_CIPHER_AES_XTS
) {
1818 size_t pad
= ALIGN(areq
->cryptlen
, AES_BLOCK_SIZE
) -
1822 ret
= artpec6_crypto_setup_out_descr(common
,
1828 ret
= artpec6_crypto_setup_in_descr(common
,
1829 ac
->pad_buffer
, pad
,
1836 ret
= artpec6_crypto_terminate_out_descrs(common
);
1840 ret
= artpec6_crypto_terminate_in_descrs(common
);
1844 return artpec6_crypto_dma_map_descs(common
);
1847 static int artpec6_crypto_prepare_aead(struct aead_request
*areq
)
1851 size_t input_length
;
1852 struct artpec6_cryptotfm_context
*ctx
= crypto_tfm_ctx(areq
->base
.tfm
);
1853 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
1854 struct crypto_aead
*cipher
= crypto_aead_reqtfm(areq
);
1855 struct artpec6_crypto_req_common
*common
= &req_ctx
->common
;
1856 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
1857 enum artpec6_crypto_variant variant
= ac
->variant
;
1860 artpec6_crypto_init_dma_operation(common
);
1863 if (variant
== ARTPEC6_CRYPTO
) {
1864 ctx
->key_md
= FIELD_PREP(A6_CRY_MD_OPER
,
1865 a6_regk_crypto_dlkey
);
1867 ctx
->key_md
= FIELD_PREP(A7_CRY_MD_OPER
,
1868 a7_regk_crypto_dlkey
);
1870 ret
= artpec6_crypto_setup_out_descr(common
, (void *)&ctx
->key_md
,
1871 sizeof(ctx
->key_md
), false, false);
1875 ret
= artpec6_crypto_setup_out_descr(common
, ctx
->aes_key
,
1876 ctx
->key_length
, true, false);
1880 req_ctx
->cipher_md
= 0;
1882 switch (ctx
->key_length
) {
1884 md_cipher_len
= regk_crypto_key_128
;
1887 md_cipher_len
= regk_crypto_key_192
;
1890 md_cipher_len
= regk_crypto_key_256
;
1896 if (variant
== ARTPEC6_CRYPTO
) {
1897 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_OPER
,
1898 regk_crypto_aes_gcm
);
1899 req_ctx
->cipher_md
|= FIELD_PREP(A6_CRY_MD_CIPHER_LEN
,
1901 if (req_ctx
->decrypt
)
1902 req_ctx
->cipher_md
|= A6_CRY_MD_CIPHER_DECR
;
1904 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_OPER
,
1905 regk_crypto_aes_gcm
);
1906 req_ctx
->cipher_md
|= FIELD_PREP(A7_CRY_MD_CIPHER_LEN
,
1908 if (req_ctx
->decrypt
)
1909 req_ctx
->cipher_md
|= A7_CRY_MD_CIPHER_DECR
;
1912 ret
= artpec6_crypto_setup_out_descr(common
,
1913 (void *) &req_ctx
->cipher_md
,
1914 sizeof(req_ctx
->cipher_md
), false,
1919 ret
= artpec6_crypto_setup_in_descr(common
, ac
->pad_buffer
, 4, false);
1923 /* For the decryption, cryptlen includes the tag. */
1924 input_length
= areq
->cryptlen
;
1925 if (req_ctx
->decrypt
)
1926 input_length
-= AES_BLOCK_SIZE
;
1928 /* Prepare the context buffer */
1929 req_ctx
->hw_ctx
.aad_length_bits
=
1930 __cpu_to_be64(8*areq
->assoclen
);
1932 req_ctx
->hw_ctx
.text_length_bits
=
1933 __cpu_to_be64(8*input_length
);
1935 memcpy(req_ctx
->hw_ctx
.J0
, areq
->iv
, crypto_aead_ivsize(cipher
));
1936 // The HW omits the initial increment of the counter field.
1937 crypto_inc(req_ctx
->hw_ctx
.J0
+12, 4);
1939 ret
= artpec6_crypto_setup_out_descr(common
, &req_ctx
->hw_ctx
,
1940 sizeof(struct artpec6_crypto_aead_hw_ctx
), false, false);
1945 struct artpec6_crypto_walk walk
;
1947 artpec6_crypto_walk_init(&walk
, areq
->src
);
1949 /* Associated data */
1950 count
= areq
->assoclen
;
1951 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1955 if (!IS_ALIGNED(areq
->assoclen
, 16)) {
1956 size_t assoc_pad
= 16 - (areq
->assoclen
% 16);
1957 /* The HW mandates zero padding here */
1958 ret
= artpec6_crypto_setup_out_descr(common
,
1966 /* Data to crypto */
1967 count
= input_length
;
1968 ret
= artpec6_crypto_setup_sg_descrs_out(common
, &walk
, count
);
1972 if (!IS_ALIGNED(input_length
, 16)) {
1973 size_t crypto_pad
= 16 - (input_length
% 16);
1974 /* The HW mandates zero padding here */
1975 ret
= artpec6_crypto_setup_out_descr(common
,
1985 /* Data from crypto */
1987 struct artpec6_crypto_walk walk
;
1988 size_t output_len
= areq
->cryptlen
;
1990 if (req_ctx
->decrypt
)
1991 output_len
-= AES_BLOCK_SIZE
;
1993 artpec6_crypto_walk_init(&walk
, areq
->dst
);
1995 /* skip associated data in the output */
1996 count
= artpec6_crypto_walk_advance(&walk
, areq
->assoclen
);
2001 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
, count
);
2005 /* Put padding between the cryptotext and the auth tag */
2006 if (!IS_ALIGNED(output_len
, 16)) {
2007 size_t crypto_pad
= 16 - (output_len
% 16);
2009 ret
= artpec6_crypto_setup_in_descr(common
,
2016 /* The authentication tag shall follow immediately after
2017 * the output ciphertext. For decryption it is put in a context
2018 * buffer for later compare against the input tag.
2020 count
= AES_BLOCK_SIZE
;
2022 if (req_ctx
->decrypt
) {
2023 ret
= artpec6_crypto_setup_in_descr(common
,
2024 req_ctx
->decryption_tag
, count
, false);
2029 ret
= artpec6_crypto_setup_sg_descrs_in(common
, &walk
,
2037 ret
= artpec6_crypto_terminate_in_descrs(common
);
2041 ret
= artpec6_crypto_terminate_out_descrs(common
);
2045 return artpec6_crypto_dma_map_descs(common
);
2048 static void artpec6_crypto_process_queue(struct artpec6_crypto
*ac
)
2050 struct artpec6_crypto_req_common
*req
;
2052 while (!list_empty(&ac
->queue
) && !artpec6_crypto_busy()) {
2053 req
= list_first_entry(&ac
->queue
,
2054 struct artpec6_crypto_req_common
,
2056 list_move_tail(&req
->list
, &ac
->pending
);
2057 artpec6_crypto_start_dma(req
);
2059 req
->req
->complete(req
->req
, -EINPROGRESS
);
2063 * In some cases, the hardware can raise an in_eop_flush interrupt
2064 * before actually updating the status, so we have an timer which will
2065 * recheck the status on timeout. Since the cases are expected to be
2066 * very rare, we use a relatively large timeout value. There should be
2067 * no noticeable negative effect if we timeout spuriously.
2069 if (ac
->pending_count
)
2070 mod_timer(&ac
->timer
, jiffies
+ msecs_to_jiffies(100));
2072 del_timer(&ac
->timer
);
2075 static void artpec6_crypto_timeout(struct timer_list
*t
)
2077 struct artpec6_crypto
*ac
= from_timer(ac
, t
, timer
);
2079 dev_info_ratelimited(artpec6_crypto_dev
, "timeout\n");
2081 tasklet_schedule(&ac
->task
);
2084 static void artpec6_crypto_task(unsigned long data
)
2086 struct artpec6_crypto
*ac
= (struct artpec6_crypto
*)data
;
2087 struct artpec6_crypto_req_common
*req
;
2088 struct artpec6_crypto_req_common
*n
;
2090 if (list_empty(&ac
->pending
)) {
2091 pr_debug("Spurious IRQ\n");
2095 spin_lock_bh(&ac
->queue_lock
);
2097 list_for_each_entry_safe(req
, n
, &ac
->pending
, list
) {
2098 struct artpec6_crypto_dma_descriptors
*dma
= req
->dma
;
2101 dma_sync_single_for_cpu(artpec6_crypto_dev
, dma
->stat_dma_addr
,
2102 sizeof(dma
->stat
[0]),
2105 stat
= req
->dma
->stat
[req
->dma
->in_cnt
-1];
2107 /* A non-zero final status descriptor indicates
2108 * this job has finished.
2110 pr_debug("Request %p status is %X\n", req
, stat
);
2114 /* Allow testing of timeout handling with fault injection */
2115 #ifdef CONFIG_FAULT_INJECTION
2116 if (should_fail(&artpec6_crypto_fail_status_read
, 1))
2120 pr_debug("Completing request %p\n", req
);
2122 list_del(&req
->list
);
2124 artpec6_crypto_dma_unmap_all(req
);
2125 artpec6_crypto_copy_bounce_buffers(req
);
2127 ac
->pending_count
--;
2128 artpec6_crypto_common_destroy(req
);
2129 req
->complete(req
->req
);
2132 artpec6_crypto_process_queue(ac
);
2134 spin_unlock_bh(&ac
->queue_lock
);
2137 static void artpec6_crypto_complete_crypto(struct crypto_async_request
*req
)
2139 req
->complete(req
, 0);
2143 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request
*req
)
2145 struct skcipher_request
*cipher_req
= container_of(req
,
2146 struct skcipher_request
, base
);
2148 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->src
,
2149 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2151 req
->complete(req
, 0);
2155 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request
*req
)
2157 struct skcipher_request
*cipher_req
= container_of(req
,
2158 struct skcipher_request
, base
);
2160 scatterwalk_map_and_copy(cipher_req
->iv
, cipher_req
->dst
,
2161 cipher_req
->cryptlen
- AES_BLOCK_SIZE
,
2163 req
->complete(req
, 0);
2166 static void artpec6_crypto_complete_aead(struct crypto_async_request
*req
)
2170 /* Verify GCM hashtag. */
2171 struct aead_request
*areq
= container_of(req
,
2172 struct aead_request
, base
);
2173 struct artpec6_crypto_aead_req_ctx
*req_ctx
= aead_request_ctx(areq
);
2175 if (req_ctx
->decrypt
) {
2176 u8 input_tag
[AES_BLOCK_SIZE
];
2178 sg_pcopy_to_buffer(areq
->src
,
2179 sg_nents(areq
->src
),
2182 areq
->assoclen
+ areq
->cryptlen
-
2185 if (memcmp(req_ctx
->decryption_tag
,
2188 pr_debug("***EBADMSG:\n");
2189 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS
, 32, 1,
2190 input_tag
, AES_BLOCK_SIZE
, true);
2191 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS
, 32, 1,
2192 req_ctx
->decryption_tag
,
2193 AES_BLOCK_SIZE
, true);
2199 req
->complete(req
, result
);
2202 static void artpec6_crypto_complete_hash(struct crypto_async_request
*req
)
2204 req
->complete(req
, 0);
2208 /*------------------- Hash functions -----------------------------------------*/
2210 artpec6_crypto_hash_set_key(struct crypto_ahash
*tfm
,
2211 const u8
*key
, unsigned int keylen
)
2213 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(&tfm
->base
);
2218 pr_err("Invalid length (%d) of HMAC key\n",
2223 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2225 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2227 if (keylen
> blocksize
) {
2228 SHASH_DESC_ON_STACK(hdesc
, tfm_ctx
->child_hash
);
2230 hdesc
->tfm
= tfm_ctx
->child_hash
;
2231 hdesc
->flags
= crypto_ahash_get_flags(tfm
) &
2232 CRYPTO_TFM_REQ_MAY_SLEEP
;
2234 tfm_ctx
->hmac_key_length
= blocksize
;
2235 ret
= crypto_shash_digest(hdesc
, key
, keylen
,
2241 memcpy(tfm_ctx
->hmac_key
, key
, keylen
);
2242 tfm_ctx
->hmac_key_length
= keylen
;
2249 artpec6_crypto_init_hash(struct ahash_request
*req
, u8 type
, int hmac
)
2251 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2252 enum artpec6_crypto_variant variant
= ac
->variant
;
2253 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2256 memset(req_ctx
, 0, sizeof(*req_ctx
));
2258 req_ctx
->hash_flags
= HASH_FLAG_INIT_CTX
;
2260 req_ctx
->hash_flags
|= (HASH_FLAG_HMAC
| HASH_FLAG_UPDATE_KEY
);
2263 case ARTPEC6_CRYPTO_HASH_SHA1
:
2264 oper
= hmac
? regk_crypto_hmac_sha1
: regk_crypto_sha1
;
2266 case ARTPEC6_CRYPTO_HASH_SHA256
:
2267 oper
= hmac
? regk_crypto_hmac_sha256
: regk_crypto_sha256
;
2269 case ARTPEC6_CRYPTO_HASH_SHA384
:
2270 oper
= hmac
? regk_crypto_hmac_sha384
: regk_crypto_sha384
;
2272 case ARTPEC6_CRYPTO_HASH_SHA512
:
2273 oper
= hmac
? regk_crypto_hmac_sha512
: regk_crypto_sha512
;
2277 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME
, type
);
2281 if (variant
== ARTPEC6_CRYPTO
)
2282 req_ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, oper
);
2284 req_ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, oper
);
2289 static int artpec6_crypto_prepare_submit_hash(struct ahash_request
*req
)
2291 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2294 if (!req_ctx
->common
.dma
) {
2295 ret
= artpec6_crypto_common_init(&req_ctx
->common
,
2297 artpec6_crypto_complete_hash
,
2304 ret
= artpec6_crypto_prepare_hash(req
);
2306 case ARTPEC6_CRYPTO_PREPARE_HASH_START
:
2307 ret
= artpec6_crypto_submit(&req_ctx
->common
);
2310 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START
:
2315 artpec6_crypto_common_destroy(&req_ctx
->common
);
2322 static int artpec6_crypto_hash_final(struct ahash_request
*req
)
2324 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2326 req_ctx
->hash_flags
|= HASH_FLAG_FINALIZE
;
2328 return artpec6_crypto_prepare_submit_hash(req
);
2331 static int artpec6_crypto_hash_update(struct ahash_request
*req
)
2333 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2335 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
;
2337 return artpec6_crypto_prepare_submit_hash(req
);
2340 static int artpec6_crypto_sha1_init(struct ahash_request
*req
)
2342 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2345 static int artpec6_crypto_sha1_digest(struct ahash_request
*req
)
2347 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2349 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA1
, 0);
2351 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2353 return artpec6_crypto_prepare_submit_hash(req
);
2356 static int artpec6_crypto_sha256_init(struct ahash_request
*req
)
2358 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2361 static int artpec6_crypto_sha256_digest(struct ahash_request
*req
)
2363 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2365 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 0);
2366 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2368 return artpec6_crypto_prepare_submit_hash(req
);
2371 static int __maybe_unused
artpec6_crypto_sha384_init(struct ahash_request
*req
)
2373 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA384
, 0);
2376 static int __maybe_unused
2377 artpec6_crypto_sha384_digest(struct ahash_request
*req
)
2379 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2381 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA384
, 0);
2382 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2384 return artpec6_crypto_prepare_submit_hash(req
);
2387 static int artpec6_crypto_sha512_init(struct ahash_request
*req
)
2389 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA512
, 0);
2392 static int artpec6_crypto_sha512_digest(struct ahash_request
*req
)
2394 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2396 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA512
, 0);
2397 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2399 return artpec6_crypto_prepare_submit_hash(req
);
2402 static int artpec6_crypto_hmac_sha256_init(struct ahash_request
*req
)
2404 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2407 static int __maybe_unused
2408 artpec6_crypto_hmac_sha384_init(struct ahash_request
*req
)
2410 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA384
, 1);
2413 static int artpec6_crypto_hmac_sha512_init(struct ahash_request
*req
)
2415 return artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA512
, 1);
2418 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request
*req
)
2420 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2422 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA256
, 1);
2423 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2425 return artpec6_crypto_prepare_submit_hash(req
);
2428 static int __maybe_unused
2429 artpec6_crypto_hmac_sha384_digest(struct ahash_request
*req
)
2431 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2433 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA384
, 1);
2434 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2436 return artpec6_crypto_prepare_submit_hash(req
);
2439 static int artpec6_crypto_hmac_sha512_digest(struct ahash_request
*req
)
2441 struct artpec6_hash_request_context
*req_ctx
= ahash_request_ctx(req
);
2443 artpec6_crypto_init_hash(req
, ARTPEC6_CRYPTO_HASH_SHA512
, 1);
2444 req_ctx
->hash_flags
|= HASH_FLAG_UPDATE
| HASH_FLAG_FINALIZE
;
2446 return artpec6_crypto_prepare_submit_hash(req
);
2449 static int artpec6_crypto_ahash_init_common(struct crypto_tfm
*tfm
,
2450 const char *base_hash_name
)
2452 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2454 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2455 sizeof(struct artpec6_hash_request_context
));
2456 memset(tfm_ctx
, 0, sizeof(*tfm_ctx
));
2458 if (base_hash_name
) {
2459 struct crypto_shash
*child
;
2461 child
= crypto_alloc_shash(base_hash_name
, 0,
2462 CRYPTO_ALG_NEED_FALLBACK
);
2465 return PTR_ERR(child
);
2467 tfm_ctx
->child_hash
= child
;
2473 static int artpec6_crypto_ahash_init(struct crypto_tfm
*tfm
)
2475 return artpec6_crypto_ahash_init_common(tfm
, NULL
);
2478 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm
*tfm
)
2480 return artpec6_crypto_ahash_init_common(tfm
, "sha256");
2483 static int __maybe_unused
2484 artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm
*tfm
)
2486 return artpec6_crypto_ahash_init_common(tfm
, "sha384");
2489 static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm
*tfm
)
2491 return artpec6_crypto_ahash_init_common(tfm
, "sha512");
2494 static void artpec6_crypto_ahash_exit(struct crypto_tfm
*tfm
)
2496 struct artpec6_hashalg_context
*tfm_ctx
= crypto_tfm_ctx(tfm
);
2498 if (tfm_ctx
->child_hash
)
2499 crypto_free_shash(tfm_ctx
->child_hash
);
2501 memset(tfm_ctx
->hmac_key
, 0, sizeof(tfm_ctx
->hmac_key
));
2502 tfm_ctx
->hmac_key_length
= 0;
2505 static int artpec6_crypto_hash_export(struct ahash_request
*req
, void *out
)
2507 const struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2508 struct artpec6_hash_export_state
*state
= out
;
2509 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2510 enum artpec6_crypto_variant variant
= ac
->variant
;
2512 BUILD_BUG_ON(sizeof(state
->partial_buffer
) !=
2513 sizeof(ctx
->partial_buffer
));
2514 BUILD_BUG_ON(sizeof(state
->digeststate
) != sizeof(ctx
->digeststate
));
2516 state
->digcnt
= ctx
->digcnt
;
2517 state
->partial_bytes
= ctx
->partial_bytes
;
2518 state
->hash_flags
= ctx
->hash_flags
;
2520 if (variant
== ARTPEC6_CRYPTO
)
2521 state
->oper
= FIELD_GET(A6_CRY_MD_OPER
, ctx
->hash_md
);
2523 state
->oper
= FIELD_GET(A7_CRY_MD_OPER
, ctx
->hash_md
);
2525 memcpy(state
->partial_buffer
, ctx
->partial_buffer
,
2526 sizeof(state
->partial_buffer
));
2527 memcpy(state
->digeststate
, ctx
->digeststate
,
2528 sizeof(state
->digeststate
));
2533 static int artpec6_crypto_hash_import(struct ahash_request
*req
, const void *in
)
2535 struct artpec6_hash_request_context
*ctx
= ahash_request_ctx(req
);
2536 const struct artpec6_hash_export_state
*state
= in
;
2537 struct artpec6_crypto
*ac
= dev_get_drvdata(artpec6_crypto_dev
);
2538 enum artpec6_crypto_variant variant
= ac
->variant
;
2540 memset(ctx
, 0, sizeof(*ctx
));
2542 ctx
->digcnt
= state
->digcnt
;
2543 ctx
->partial_bytes
= state
->partial_bytes
;
2544 ctx
->hash_flags
= state
->hash_flags
;
2546 if (variant
== ARTPEC6_CRYPTO
)
2547 ctx
->hash_md
= FIELD_PREP(A6_CRY_MD_OPER
, state
->oper
);
2549 ctx
->hash_md
= FIELD_PREP(A7_CRY_MD_OPER
, state
->oper
);
2551 memcpy(ctx
->partial_buffer
, state
->partial_buffer
,
2552 sizeof(state
->partial_buffer
));
2553 memcpy(ctx
->digeststate
, state
->digeststate
,
2554 sizeof(state
->digeststate
));
2559 static int init_crypto_hw(struct artpec6_crypto
*ac
)
2561 enum artpec6_crypto_variant variant
= ac
->variant
;
2562 void __iomem
*base
= ac
->base
;
2563 u32 out_descr_buf_size
;
2564 u32 out_data_buf_size
;
2565 u32 in_data_buf_size
;
2566 u32 in_descr_buf_size
;
2567 u32 in_stat_buf_size
;
2571 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2572 * channels and 1024 bytes for the IN channel. This is an elastic
2573 * memory used to internally store the descriptors and data. The values
2574 * ares specified in 64 byte incremements. Trustzone buffers are not
2575 * used at this stage.
2577 out_data_buf_size
= 16; /* 1024 bytes for data */
2578 out_descr_buf_size
= 15; /* 960 bytes for descriptors */
2579 in_data_buf_size
= 8; /* 512 bytes for data */
2580 in_descr_buf_size
= 4; /* 256 bytes for descriptors */
2581 in_stat_buf_size
= 4; /* 256 bytes for stat descrs */
2583 BUILD_BUG_ON_MSG((out_data_buf_size
2584 + out_descr_buf_size
) * 64 > 1984,
2585 "Invalid OUT configuration");
2587 BUILD_BUG_ON_MSG((in_data_buf_size
2589 + in_stat_buf_size
) * 64 > 1024,
2590 "Invalid IN configuration");
2592 in
= FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE
, in_data_buf_size
) |
2593 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE
, in_descr_buf_size
) |
2594 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE
, in_stat_buf_size
);
2596 out
= FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE
, out_data_buf_size
) |
2597 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE
, out_descr_buf_size
);
2599 writel_relaxed(out
, base
+ PDMA_OUT_BUF_CFG
);
2600 writel_relaxed(PDMA_OUT_CFG_EN
, base
+ PDMA_OUT_CFG
);
2602 if (variant
== ARTPEC6_CRYPTO
) {
2603 writel_relaxed(in
, base
+ A6_PDMA_IN_BUF_CFG
);
2604 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A6_PDMA_IN_CFG
);
2605 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA
|
2606 A6_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2607 base
+ A6_PDMA_INTR_MASK
);
2609 writel_relaxed(in
, base
+ A7_PDMA_IN_BUF_CFG
);
2610 writel_relaxed(PDMA_IN_CFG_EN
, base
+ A7_PDMA_IN_CFG
);
2611 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA
|
2612 A7_PDMA_INTR_MASK_IN_EOP_FLUSH
,
2613 base
+ A7_PDMA_INTR_MASK
);
2619 static void artpec6_crypto_disable_hw(struct artpec6_crypto
*ac
)
2621 enum artpec6_crypto_variant variant
= ac
->variant
;
2622 void __iomem
*base
= ac
->base
;
2624 if (variant
== ARTPEC6_CRYPTO
) {
2625 writel_relaxed(A6_PDMA_IN_CMD_STOP
, base
+ A6_PDMA_IN_CMD
);
2626 writel_relaxed(0, base
+ A6_PDMA_IN_CFG
);
2627 writel_relaxed(A6_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2629 writel_relaxed(A7_PDMA_IN_CMD_STOP
, base
+ A7_PDMA_IN_CMD
);
2630 writel_relaxed(0, base
+ A7_PDMA_IN_CFG
);
2631 writel_relaxed(A7_PDMA_OUT_CMD_STOP
, base
+ PDMA_OUT_CMD
);
2634 writel_relaxed(0, base
+ PDMA_OUT_CFG
);
2638 static irqreturn_t
artpec6_crypto_irq(int irq
, void *dev_id
)
2640 struct artpec6_crypto
*ac
= dev_id
;
2641 enum artpec6_crypto_variant variant
= ac
->variant
;
2642 void __iomem
*base
= ac
->base
;
2643 u32 mask_in_data
, mask_in_eop_flush
;
2644 u32 in_cmd_flush_stat
, in_cmd_reg
;
2649 if (variant
== ARTPEC6_CRYPTO
) {
2650 intr
= readl_relaxed(base
+ A6_PDMA_MASKED_INTR
);
2651 mask_in_data
= A6_PDMA_INTR_MASK_IN_DATA
;
2652 mask_in_eop_flush
= A6_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2653 in_cmd_flush_stat
= A6_PDMA_IN_CMD_FLUSH_STAT
;
2654 in_cmd_reg
= A6_PDMA_IN_CMD
;
2655 ack_intr_reg
= A6_PDMA_ACK_INTR
;
2657 intr
= readl_relaxed(base
+ A7_PDMA_MASKED_INTR
);
2658 mask_in_data
= A7_PDMA_INTR_MASK_IN_DATA
;
2659 mask_in_eop_flush
= A7_PDMA_INTR_MASK_IN_EOP_FLUSH
;
2660 in_cmd_flush_stat
= A7_PDMA_IN_CMD_FLUSH_STAT
;
2661 in_cmd_reg
= A7_PDMA_IN_CMD
;
2662 ack_intr_reg
= A7_PDMA_ACK_INTR
;
2665 /* We get two interrupt notifications from each job.
2666 * The in_data means all data was sent to memory and then
2667 * we request a status flush command to write the per-job
2668 * status to its status vector. This ensures that the
2669 * tasklet can detect exactly how many submitted jobs
2670 * that have finished.
2672 if (intr
& mask_in_data
)
2673 ack
|= mask_in_data
;
2675 if (intr
& mask_in_eop_flush
)
2676 ack
|= mask_in_eop_flush
;
2678 writel_relaxed(in_cmd_flush_stat
, base
+ in_cmd_reg
);
2680 writel_relaxed(ack
, base
+ ack_intr_reg
);
2682 if (intr
& mask_in_eop_flush
)
2683 tasklet_schedule(&ac
->task
);
2688 /*------------------- Algorithm definitions ----------------------------------*/
2691 static struct ahash_alg hash_algos
[] = {
2694 .init
= artpec6_crypto_sha1_init
,
2695 .update
= artpec6_crypto_hash_update
,
2696 .final
= artpec6_crypto_hash_final
,
2697 .digest
= artpec6_crypto_sha1_digest
,
2698 .import
= artpec6_crypto_hash_import
,
2699 .export
= artpec6_crypto_hash_export
,
2700 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2701 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2704 .cra_driver_name
= "artpec-sha1",
2705 .cra_priority
= 300,
2706 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2707 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2708 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2710 .cra_module
= THIS_MODULE
,
2711 .cra_init
= artpec6_crypto_ahash_init
,
2712 .cra_exit
= artpec6_crypto_ahash_exit
,
2717 .init
= artpec6_crypto_sha256_init
,
2718 .update
= artpec6_crypto_hash_update
,
2719 .final
= artpec6_crypto_hash_final
,
2720 .digest
= artpec6_crypto_sha256_digest
,
2721 .import
= artpec6_crypto_hash_import
,
2722 .export
= artpec6_crypto_hash_export
,
2723 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2724 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2726 .cra_name
= "sha256",
2727 .cra_driver_name
= "artpec-sha256",
2728 .cra_priority
= 300,
2729 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2730 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2731 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2733 .cra_module
= THIS_MODULE
,
2734 .cra_init
= artpec6_crypto_ahash_init
,
2735 .cra_exit
= artpec6_crypto_ahash_exit
,
2740 .init
= artpec6_crypto_hmac_sha256_init
,
2741 .update
= artpec6_crypto_hash_update
,
2742 .final
= artpec6_crypto_hash_final
,
2743 .digest
= artpec6_crypto_hmac_sha256_digest
,
2744 .import
= artpec6_crypto_hash_import
,
2745 .export
= artpec6_crypto_hash_export
,
2746 .setkey
= artpec6_crypto_hash_set_key
,
2747 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2748 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2750 .cra_name
= "hmac(sha256)",
2751 .cra_driver_name
= "artpec-hmac-sha256",
2752 .cra_priority
= 300,
2753 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2754 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2755 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2757 .cra_module
= THIS_MODULE
,
2758 .cra_init
= artpec6_crypto_ahash_init_hmac_sha256
,
2759 .cra_exit
= artpec6_crypto_ahash_exit
,
2764 static struct ahash_alg artpec7_hash_algos
[] = {
2767 .init
= artpec6_crypto_sha384_init
,
2768 .update
= artpec6_crypto_hash_update
,
2769 .final
= artpec6_crypto_hash_final
,
2770 .digest
= artpec6_crypto_sha384_digest
,
2771 .import
= artpec6_crypto_hash_import
,
2772 .export
= artpec6_crypto_hash_export
,
2773 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2774 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2776 .cra_name
= "sha384",
2777 .cra_driver_name
= "artpec-sha384",
2778 .cra_priority
= 300,
2779 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2780 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2781 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2783 .cra_module
= THIS_MODULE
,
2784 .cra_init
= artpec6_crypto_ahash_init
,
2785 .cra_exit
= artpec6_crypto_ahash_exit
,
2790 .init
= artpec6_crypto_hmac_sha384_init
,
2791 .update
= artpec6_crypto_hash_update
,
2792 .final
= artpec6_crypto_hash_final
,
2793 .digest
= artpec6_crypto_hmac_sha384_digest
,
2794 .import
= artpec6_crypto_hash_import
,
2795 .export
= artpec6_crypto_hash_export
,
2796 .setkey
= artpec6_crypto_hash_set_key
,
2797 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2798 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2800 .cra_name
= "hmac(sha384)",
2801 .cra_driver_name
= "artpec-hmac-sha384",
2802 .cra_priority
= 300,
2803 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2804 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2805 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2807 .cra_module
= THIS_MODULE
,
2808 .cra_init
= artpec6_crypto_ahash_init_hmac_sha384
,
2809 .cra_exit
= artpec6_crypto_ahash_exit
,
2814 .init
= artpec6_crypto_sha512_init
,
2815 .update
= artpec6_crypto_hash_update
,
2816 .final
= artpec6_crypto_hash_final
,
2817 .digest
= artpec6_crypto_sha512_digest
,
2818 .import
= artpec6_crypto_hash_import
,
2819 .export
= artpec6_crypto_hash_export
,
2820 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2821 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2823 .cra_name
= "sha512",
2824 .cra_driver_name
= "artpec-sha512",
2825 .cra_priority
= 300,
2826 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2827 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2828 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2830 .cra_module
= THIS_MODULE
,
2831 .cra_init
= artpec6_crypto_ahash_init
,
2832 .cra_exit
= artpec6_crypto_ahash_exit
,
2837 .init
= artpec6_crypto_hmac_sha512_init
,
2838 .update
= artpec6_crypto_hash_update
,
2839 .final
= artpec6_crypto_hash_final
,
2840 .digest
= artpec6_crypto_hmac_sha512_digest
,
2841 .import
= artpec6_crypto_hash_import
,
2842 .export
= artpec6_crypto_hash_export
,
2843 .setkey
= artpec6_crypto_hash_set_key
,
2844 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2845 .halg
.statesize
= sizeof(struct artpec6_hash_export_state
),
2847 .cra_name
= "hmac(sha512)",
2848 .cra_driver_name
= "artpec-hmac-sha512",
2849 .cra_priority
= 300,
2850 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
,
2851 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2852 .cra_ctxsize
= sizeof(struct artpec6_hashalg_context
),
2854 .cra_module
= THIS_MODULE
,
2855 .cra_init
= artpec6_crypto_ahash_init_hmac_sha512
,
2856 .cra_exit
= artpec6_crypto_ahash_exit
,
2862 static struct skcipher_alg crypto_algos
[] = {
2866 .cra_name
= "ecb(aes)",
2867 .cra_driver_name
= "artpec6-ecb-aes",
2868 .cra_priority
= 300,
2869 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
|
2871 .cra_blocksize
= AES_BLOCK_SIZE
,
2872 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2874 .cra_module
= THIS_MODULE
,
2876 .min_keysize
= AES_MIN_KEY_SIZE
,
2877 .max_keysize
= AES_MAX_KEY_SIZE
,
2878 .setkey
= artpec6_crypto_cipher_set_key
,
2879 .encrypt
= artpec6_crypto_encrypt
,
2880 .decrypt
= artpec6_crypto_decrypt
,
2881 .init
= artpec6_crypto_aes_ecb_init
,
2882 .exit
= artpec6_crypto_aes_exit
,
2887 .cra_name
= "ctr(aes)",
2888 .cra_driver_name
= "artpec6-ctr-aes",
2889 .cra_priority
= 300,
2890 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
|
2892 CRYPTO_ALG_NEED_FALLBACK
,
2894 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2896 .cra_module
= THIS_MODULE
,
2898 .min_keysize
= AES_MIN_KEY_SIZE
,
2899 .max_keysize
= AES_MAX_KEY_SIZE
,
2900 .ivsize
= AES_BLOCK_SIZE
,
2901 .setkey
= artpec6_crypto_cipher_set_key
,
2902 .encrypt
= artpec6_crypto_ctr_encrypt
,
2903 .decrypt
= artpec6_crypto_ctr_decrypt
,
2904 .init
= artpec6_crypto_aes_ctr_init
,
2905 .exit
= artpec6_crypto_aes_ctr_exit
,
2910 .cra_name
= "cbc(aes)",
2911 .cra_driver_name
= "artpec6-cbc-aes",
2912 .cra_priority
= 300,
2913 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
|
2915 .cra_blocksize
= AES_BLOCK_SIZE
,
2916 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2918 .cra_module
= THIS_MODULE
,
2920 .min_keysize
= AES_MIN_KEY_SIZE
,
2921 .max_keysize
= AES_MAX_KEY_SIZE
,
2922 .ivsize
= AES_BLOCK_SIZE
,
2923 .setkey
= artpec6_crypto_cipher_set_key
,
2924 .encrypt
= artpec6_crypto_encrypt
,
2925 .decrypt
= artpec6_crypto_decrypt
,
2926 .init
= artpec6_crypto_aes_cbc_init
,
2927 .exit
= artpec6_crypto_aes_exit
2932 .cra_name
= "xts(aes)",
2933 .cra_driver_name
= "artpec6-xts-aes",
2934 .cra_priority
= 300,
2935 .cra_flags
= CRYPTO_ALG_TYPE_SKCIPHER
|
2938 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2940 .cra_module
= THIS_MODULE
,
2942 .min_keysize
= 2*AES_MIN_KEY_SIZE
,
2943 .max_keysize
= 2*AES_MAX_KEY_SIZE
,
2945 .setkey
= artpec6_crypto_xts_set_key
,
2946 .encrypt
= artpec6_crypto_encrypt
,
2947 .decrypt
= artpec6_crypto_decrypt
,
2948 .init
= artpec6_crypto_aes_xts_init
,
2949 .exit
= artpec6_crypto_aes_exit
,
2953 static struct aead_alg aead_algos
[] = {
2955 .init
= artpec6_crypto_aead_init
,
2956 .setkey
= artpec6_crypto_aead_set_key
,
2957 .encrypt
= artpec6_crypto_aead_encrypt
,
2958 .decrypt
= artpec6_crypto_aead_decrypt
,
2959 .ivsize
= AES_BLOCK_SIZE
,
2960 .maxauthsize
= AES_BLOCK_SIZE
,
2963 .cra_name
= "gcm(aes)",
2964 .cra_driver_name
= "artpec-gcm-aes",
2965 .cra_priority
= 300,
2966 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
|
2967 CRYPTO_ALG_KERN_DRIVER_ONLY
,
2969 .cra_ctxsize
= sizeof(struct artpec6_cryptotfm_context
),
2971 .cra_module
= THIS_MODULE
,
2976 #ifdef CONFIG_DEBUG_FS
2985 static struct dentry
*dbgfs_root
;
2987 static void artpec6_crypto_init_debugfs(void)
2989 dbgfs_root
= debugfs_create_dir("artpec6_crypto", NULL
);
2991 if (!dbgfs_root
|| IS_ERR(dbgfs_root
)) {
2993 pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME
);
2997 #ifdef CONFIG_FAULT_INJECTION
2998 fault_create_debugfs_attr("fail_status_read", dbgfs_root
,
2999 &artpec6_crypto_fail_status_read
);
3001 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root
,
3002 &artpec6_crypto_fail_dma_array_full
);
3006 static void artpec6_crypto_free_debugfs(void)
3011 debugfs_remove_recursive(dbgfs_root
);
3016 static const struct of_device_id artpec6_crypto_of_match
[] = {
3017 { .compatible
= "axis,artpec6-crypto", .data
= (void *)ARTPEC6_CRYPTO
},
3018 { .compatible
= "axis,artpec7-crypto", .data
= (void *)ARTPEC7_CRYPTO
},
3021 MODULE_DEVICE_TABLE(of
, artpec6_crypto_of_match
);
3023 static int artpec6_crypto_probe(struct platform_device
*pdev
)
3025 const struct of_device_id
*match
;
3026 enum artpec6_crypto_variant variant
;
3027 struct artpec6_crypto
*ac
;
3028 struct device
*dev
= &pdev
->dev
;
3030 struct resource
*res
;
3034 if (artpec6_crypto_dev
)
3037 match
= of_match_node(artpec6_crypto_of_match
, dev
->of_node
);
3041 variant
= (enum artpec6_crypto_variant
)match
->data
;
3043 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3047 base
= devm_ioremap_resource(&pdev
->dev
, res
);
3049 return PTR_ERR(base
);
3051 irq
= platform_get_irq(pdev
, 0);
3055 ac
= devm_kzalloc(&pdev
->dev
, sizeof(struct artpec6_crypto
),
3060 platform_set_drvdata(pdev
, ac
);
3061 ac
->variant
= variant
;
3063 spin_lock_init(&ac
->queue_lock
);
3064 INIT_LIST_HEAD(&ac
->queue
);
3065 INIT_LIST_HEAD(&ac
->pending
);
3066 timer_setup(&ac
->timer
, artpec6_crypto_timeout
, 0);
3070 ac
->dma_cache
= kmem_cache_create("artpec6_crypto_dma",
3071 sizeof(struct artpec6_crypto_dma_descriptors
),
3078 #ifdef CONFIG_DEBUG_FS
3079 artpec6_crypto_init_debugfs();
3082 tasklet_init(&ac
->task
, artpec6_crypto_task
,
3085 ac
->pad_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
3087 if (!ac
->pad_buffer
)
3089 ac
->pad_buffer
= PTR_ALIGN(ac
->pad_buffer
, ARTPEC_CACHE_LINE_MAX
);
3091 ac
->zero_buffer
= devm_kzalloc(&pdev
->dev
, 2 * ARTPEC_CACHE_LINE_MAX
,
3093 if (!ac
->zero_buffer
)
3095 ac
->zero_buffer
= PTR_ALIGN(ac
->zero_buffer
, ARTPEC_CACHE_LINE_MAX
);
3097 err
= init_crypto_hw(ac
);
3101 err
= devm_request_irq(&pdev
->dev
, irq
, artpec6_crypto_irq
, 0,
3102 "artpec6-crypto", ac
);
3106 artpec6_crypto_dev
= &pdev
->dev
;
3108 err
= crypto_register_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
3110 dev_err(dev
, "Failed to register ahashes\n");
3114 if (variant
!= ARTPEC6_CRYPTO
) {
3115 err
= crypto_register_ahashes(artpec7_hash_algos
,
3116 ARRAY_SIZE(artpec7_hash_algos
));
3118 dev_err(dev
, "Failed to register ahashes\n");
3119 goto unregister_ahashes
;
3123 err
= crypto_register_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
3125 dev_err(dev
, "Failed to register ciphers\n");
3126 goto unregister_a7_ahashes
;
3129 err
= crypto_register_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
3131 dev_err(dev
, "Failed to register aeads\n");
3132 goto unregister_algs
;
3138 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
3139 unregister_a7_ahashes
:
3140 if (variant
!= ARTPEC6_CRYPTO
)
3141 crypto_unregister_ahashes(artpec7_hash_algos
,
3142 ARRAY_SIZE(artpec7_hash_algos
));
3144 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
3146 artpec6_crypto_disable_hw(ac
);
3148 kmem_cache_destroy(ac
->dma_cache
);
3152 static int artpec6_crypto_remove(struct platform_device
*pdev
)
3154 struct artpec6_crypto
*ac
= platform_get_drvdata(pdev
);
3155 int irq
= platform_get_irq(pdev
, 0);
3157 crypto_unregister_ahashes(hash_algos
, ARRAY_SIZE(hash_algos
));
3158 if (ac
->variant
!= ARTPEC6_CRYPTO
)
3159 crypto_unregister_ahashes(artpec7_hash_algos
,
3160 ARRAY_SIZE(artpec7_hash_algos
));
3161 crypto_unregister_skciphers(crypto_algos
, ARRAY_SIZE(crypto_algos
));
3162 crypto_unregister_aeads(aead_algos
, ARRAY_SIZE(aead_algos
));
3164 tasklet_disable(&ac
->task
);
3165 devm_free_irq(&pdev
->dev
, irq
, ac
);
3166 tasklet_kill(&ac
->task
);
3167 del_timer_sync(&ac
->timer
);
3169 artpec6_crypto_disable_hw(ac
);
3171 kmem_cache_destroy(ac
->dma_cache
);
3172 #ifdef CONFIG_DEBUG_FS
3173 artpec6_crypto_free_debugfs();
3178 static struct platform_driver artpec6_crypto_driver
= {
3179 .probe
= artpec6_crypto_probe
,
3180 .remove
= artpec6_crypto_remove
,
3182 .name
= "artpec6-crypto",
3183 .owner
= THIS_MODULE
,
3184 .of_match_table
= artpec6_crypto_of_match
,
3188 module_platform_driver(artpec6_crypto_driver
);
3190 MODULE_AUTHOR("Axis Communications AB");
3191 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
3192 MODULE_LICENSE("GPL");