2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
44 * ---------------------
51 #include "desc_constr.h"
54 #include "sg_sw_sec4.h"
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
64 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH 16
68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
74 /* length of descriptors text */
75 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
77 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
78 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
80 /* Note: Nonce is counted in enckeylen */
81 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
83 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
84 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
85 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
87 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
88 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
89 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
91 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
92 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
93 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
95 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
96 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
97 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
99 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
100 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
102 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
105 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
106 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
109 /* for print_hex_dumps with line references */
110 #define debug(format, arg...) printk(format, arg)
112 #define debug(format, arg...)
116 #include <linux/highmem.h>
118 static void dbg_dump_sg(const char *level
, const char *prefix_str
,
119 int prefix_type
, int rowsize
, int groupsize
,
120 struct scatterlist
*sg
, size_t tlen
, bool ascii
,
123 struct scatterlist
*it
;
128 for (it
= sg
; it
!= NULL
&& tlen
> 0 ; it
= sg_next(sg
)) {
130 * make sure the scatterlist's page
131 * has a valid virtual memory mapping
133 it_page
= kmap_atomic(sg_page(it
));
134 if (unlikely(!it_page
)) {
135 printk(KERN_ERR
"dbg_dump_sg: kmap failed\n");
139 buf
= it_page
+ it
->offset
;
140 len
= min(tlen
, it
->length
);
141 print_hex_dump(level
, prefix_str
, prefix_type
, rowsize
,
142 groupsize
, buf
, len
, ascii
);
145 kunmap_atomic(it_page
);
150 static struct list_head alg_list
;
152 struct caam_alg_entry
{
160 struct caam_aead_alg
{
161 struct aead_alg aead
;
162 struct caam_alg_entry caam
;
166 /* Set DK bit in class 1 operation if shared */
167 static inline void append_dec_op1(u32
*desc
, u32 type
)
169 u32
*jump_cmd
, *uncond_jump_cmd
;
171 /* DK bit is valid only for AES */
172 if ((type
& OP_ALG_ALGSEL_MASK
) != OP_ALG_ALGSEL_AES
) {
173 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
178 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_SHRD
);
179 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
181 uncond_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
182 set_jump_tgt_here(desc
, jump_cmd
);
183 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
184 OP_ALG_DECRYPT
| OP_ALG_AAI_DK
);
185 set_jump_tgt_here(desc
, uncond_jump_cmd
);
189 * For aead functions, read payload and write payload,
190 * both of which are specified in req->src and req->dst
192 static inline void aead_append_src_dst(u32
*desc
, u32 msg_type
)
194 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
195 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
|
196 KEY_VLF
| msg_type
| FIFOLD_TYPE_LASTBOTH
);
200 * For ablkcipher encrypt and decrypt, read from req->src and
203 static inline void ablkcipher_append_src_dst(u32
*desc
)
205 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
206 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
207 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
|
208 KEY_VLF
| FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
209 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
213 * per-session context
216 struct device
*jrdev
;
217 u32 sh_desc_enc
[DESC_MAX_USED_LEN
];
218 u32 sh_desc_dec
[DESC_MAX_USED_LEN
];
219 u32 sh_desc_givenc
[DESC_MAX_USED_LEN
];
220 dma_addr_t sh_desc_enc_dma
;
221 dma_addr_t sh_desc_dec_dma
;
222 dma_addr_t sh_desc_givenc_dma
;
226 u8 key
[CAAM_MAX_KEY_SIZE
];
228 unsigned int enckeylen
;
229 unsigned int split_key_len
;
230 unsigned int split_key_pad_len
;
231 unsigned int authsize
;
234 static void append_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
235 int keys_fit_inline
, bool is_rfc3686
)
238 unsigned int enckeylen
= ctx
->enckeylen
;
242 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
243 * | enckeylen = encryption key size + nonce size
246 enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
248 if (keys_fit_inline
) {
249 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
250 ctx
->split_key_len
, CLASS_2
|
251 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
252 append_key_as_imm(desc
, (void *)ctx
->key
+
253 ctx
->split_key_pad_len
, enckeylen
,
254 enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
256 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
257 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
258 append_key(desc
, ctx
->key_dma
+ ctx
->split_key_pad_len
,
259 enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
262 /* Load Counter into CONTEXT1 reg */
264 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->split_key_pad_len
+
266 append_load_as_imm(desc
, nonce
, CTR_RFC3686_NONCE_SIZE
,
268 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
271 MOVE_DEST_CLASS1CTX
|
272 (16 << MOVE_OFFSET_SHIFT
) |
273 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
277 static void init_sh_desc_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
278 int keys_fit_inline
, bool is_rfc3686
)
282 /* Note: Context registers are saved. */
283 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
285 /* Skip if already shared */
286 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
289 append_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
291 set_jump_tgt_here(desc
, key_jump_cmd
);
294 static int aead_null_set_sh_desc(struct crypto_aead
*aead
)
296 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
297 struct device
*jrdev
= ctx
->jrdev
;
298 bool keys_fit_inline
= false;
299 u32
*key_jump_cmd
, *jump_cmd
, *read_move_cmd
, *write_move_cmd
;
303 * Job Descriptor and Shared Descriptors
304 * must all fit into the 64-word Descriptor h/w Buffer
306 if (DESC_AEAD_NULL_ENC_LEN
+ AEAD_DESC_JOB_IO_LEN
+
307 ctx
->split_key_pad_len
<= CAAM_DESC_BYTES_MAX
)
308 keys_fit_inline
= true;
310 /* aead_encrypt shared descriptor */
311 desc
= ctx
->sh_desc_enc
;
313 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
315 /* Skip if already shared */
316 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
319 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
320 ctx
->split_key_len
, CLASS_2
|
321 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
323 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
324 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
325 set_jump_tgt_here(desc
, key_jump_cmd
);
327 /* assoclen + cryptlen = seqinlen */
328 append_math_sub(desc
, REG3
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
330 /* Prepare to read and write cryptlen + assoclen bytes */
331 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
332 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
335 * MOVE_LEN opcode is not available in all SEC HW revisions,
336 * thus need to do some magic, i.e. self-patch the descriptor
339 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
|
341 (0x6 << MOVE_LEN_SHIFT
));
342 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
|
345 (0x8 << MOVE_LEN_SHIFT
));
347 /* Class 2 operation */
348 append_operation(desc
, ctx
->class2_alg_type
|
349 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
351 /* Read and write cryptlen bytes */
352 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
354 set_move_tgt_here(desc
, read_move_cmd
);
355 set_move_tgt_here(desc
, write_move_cmd
);
356 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
357 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
|
361 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
362 LDST_SRCDST_BYTE_CONTEXT
);
364 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
367 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
368 dev_err(jrdev
, "unable to map shared descriptor\n");
372 print_hex_dump(KERN_ERR
,
373 "aead null enc shdesc@"__stringify(__LINE__
)": ",
374 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
375 desc_bytes(desc
), 1);
379 * Job Descriptor and Shared Descriptors
380 * must all fit into the 64-word Descriptor h/w Buffer
382 keys_fit_inline
= false;
383 if (DESC_AEAD_NULL_DEC_LEN
+ DESC_JOB_IO_LEN
+
384 ctx
->split_key_pad_len
<= CAAM_DESC_BYTES_MAX
)
385 keys_fit_inline
= true;
387 desc
= ctx
->sh_desc_dec
;
389 /* aead_decrypt shared descriptor */
390 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
392 /* Skip if already shared */
393 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
396 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
397 ctx
->split_key_len
, CLASS_2
|
398 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
400 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
401 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
402 set_jump_tgt_here(desc
, key_jump_cmd
);
404 /* Class 2 operation */
405 append_operation(desc
, ctx
->class2_alg_type
|
406 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
408 /* assoclen + cryptlen = seqoutlen */
409 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
411 /* Prepare to read and write cryptlen + assoclen bytes */
412 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
413 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
416 * MOVE_LEN opcode is not available in all SEC HW revisions,
417 * thus need to do some magic, i.e. self-patch the descriptor
420 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
|
422 (0x6 << MOVE_LEN_SHIFT
));
423 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH2
|
426 (0x8 << MOVE_LEN_SHIFT
));
428 /* Read and write cryptlen bytes */
429 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
432 * Insert a NOP here, since we need at least 4 instructions between
433 * code patching the descriptor buffer and the location being patched.
435 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
436 set_jump_tgt_here(desc
, jump_cmd
);
438 set_move_tgt_here(desc
, read_move_cmd
);
439 set_move_tgt_here(desc
, write_move_cmd
);
440 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
441 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
|
443 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
446 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
447 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
449 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
452 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
453 dev_err(jrdev
, "unable to map shared descriptor\n");
457 print_hex_dump(KERN_ERR
,
458 "aead null dec shdesc@"__stringify(__LINE__
)": ",
459 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
460 desc_bytes(desc
), 1);
466 static int aead_set_sh_desc(struct crypto_aead
*aead
)
468 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
469 struct caam_aead_alg
, aead
);
470 unsigned int ivsize
= crypto_aead_ivsize(aead
);
471 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
472 struct device
*jrdev
= ctx
->jrdev
;
473 bool keys_fit_inline
;
477 const bool ctr_mode
= ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) ==
478 OP_ALG_AAI_CTR_MOD128
);
479 const bool is_rfc3686
= alg
->caam
.rfc3686
;
484 /* NULL encryption / decryption */
486 return aead_null_set_sh_desc(aead
);
489 * AES-CTR needs to load IV in CONTEXT1 reg
490 * at an offset of 128bits (16bytes)
491 * CONTEXT1[255:128] = IV
498 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
501 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
507 * Job Descriptor and Shared Descriptors
508 * must all fit into the 64-word Descriptor h/w Buffer
510 keys_fit_inline
= false;
511 if (DESC_AEAD_ENC_LEN
+ AUTHENC_DESC_JOB_IO_LEN
+
512 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
513 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
515 keys_fit_inline
= true;
517 /* aead_encrypt shared descriptor */
518 desc
= ctx
->sh_desc_enc
;
520 /* Note: Context registers are saved. */
521 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
523 /* Class 2 operation */
524 append_operation(desc
, ctx
->class2_alg_type
|
525 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
527 /* Read and write assoclen bytes */
528 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
529 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
531 /* Skip assoc data */
532 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
534 /* read assoc before reading payload */
535 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
538 /* Load Counter into CONTEXT1 reg */
540 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
541 LDST_SRCDST_BYTE_CONTEXT
|
542 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
545 /* Class 1 operation */
546 append_operation(desc
, ctx
->class1_alg_type
|
547 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
549 /* Read and write cryptlen bytes */
550 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
551 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
552 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
555 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
556 LDST_SRCDST_BYTE_CONTEXT
);
558 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
561 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
562 dev_err(jrdev
, "unable to map shared descriptor\n");
566 print_hex_dump(KERN_ERR
, "aead enc shdesc@"__stringify(__LINE__
)": ",
567 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
568 desc_bytes(desc
), 1);
573 * Job Descriptor and Shared Descriptors
574 * must all fit into the 64-word Descriptor h/w Buffer
576 keys_fit_inline
= false;
577 if (DESC_AEAD_DEC_LEN
+ AUTHENC_DESC_JOB_IO_LEN
+
578 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
579 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
581 keys_fit_inline
= true;
583 /* aead_decrypt shared descriptor */
584 desc
= ctx
->sh_desc_dec
;
586 /* Note: Context registers are saved. */
587 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
589 /* Class 2 operation */
590 append_operation(desc
, ctx
->class2_alg_type
|
591 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
593 /* Read and write assoclen bytes */
594 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
596 append_math_add_imm_u32(desc
, VARSEQOUTLEN
, REG3
, IMM
, ivsize
);
598 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
600 /* Skip assoc data */
601 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
603 /* read assoc before reading payload */
604 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
607 if (alg
->caam
.geniv
) {
608 append_seq_load(desc
, ivsize
, LDST_CLASS_1_CCB
|
609 LDST_SRCDST_BYTE_CONTEXT
|
610 (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
611 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_CLASS2INFIFO
|
612 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
) | ivsize
);
615 /* Load Counter into CONTEXT1 reg */
617 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
618 LDST_SRCDST_BYTE_CONTEXT
|
619 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
622 /* Choose operation */
624 append_operation(desc
, ctx
->class1_alg_type
|
625 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
);
627 append_dec_op1(desc
, ctx
->class1_alg_type
);
629 /* Read and write cryptlen bytes */
630 append_math_add(desc
, VARSEQINLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
631 append_math_add(desc
, VARSEQOUTLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
632 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
);
635 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
636 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
638 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
641 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
642 dev_err(jrdev
, "unable to map shared descriptor\n");
646 print_hex_dump(KERN_ERR
, "aead dec shdesc@"__stringify(__LINE__
)": ",
647 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
648 desc_bytes(desc
), 1);
651 if (!alg
->caam
.geniv
)
655 * Job Descriptor and Shared Descriptors
656 * must all fit into the 64-word Descriptor h/w Buffer
658 keys_fit_inline
= false;
659 if (DESC_AEAD_GIVENC_LEN
+ AUTHENC_DESC_JOB_IO_LEN
+
660 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
661 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
663 keys_fit_inline
= true;
665 /* aead_givencrypt shared descriptor */
666 desc
= ctx
->sh_desc_enc
;
668 /* Note: Context registers are saved. */
669 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
675 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
676 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
677 NFIFOENTRY_PTYPE_RND
| (ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
678 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
679 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
680 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
681 append_move(desc
, MOVE_WAITCOMP
|
682 MOVE_SRC_INFIFO
| MOVE_DEST_CLASS1CTX
|
683 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
) |
684 (ivsize
<< MOVE_LEN_SHIFT
));
685 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
688 /* Copy IV to class 1 context */
689 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_OUTFIFO
|
690 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
) |
691 (ivsize
<< MOVE_LEN_SHIFT
));
693 /* Return to encryption */
694 append_operation(desc
, ctx
->class2_alg_type
|
695 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
697 /* Read and write assoclen bytes */
698 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
699 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
701 /* ivsize + cryptlen = seqoutlen - authsize */
702 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
704 /* Skip assoc data */
705 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
707 /* read assoc before reading payload */
708 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
711 /* Copy iv from outfifo to class 2 fifo */
712 moveiv
= NFIFOENTRY_STYPE_OFIFO
| NFIFOENTRY_DEST_CLASS2
|
713 NFIFOENTRY_DTYPE_MSG
| (ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
714 append_load_imm_u32(desc
, moveiv
, LDST_CLASS_IND_CCB
|
715 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
716 append_load_imm_u32(desc
, ivsize
, LDST_CLASS_2_CCB
|
717 LDST_SRCDST_WORD_DATASZ_REG
| LDST_IMM
);
719 /* Load Counter into CONTEXT1 reg */
721 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
722 LDST_SRCDST_BYTE_CONTEXT
|
723 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
726 /* Class 1 operation */
727 append_operation(desc
, ctx
->class1_alg_type
|
728 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
730 /* Will write ivsize + cryptlen */
731 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
733 /* Not need to reload iv */
734 append_seq_fifo_load(desc
, ivsize
,
737 /* Will read cryptlen */
738 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
739 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
742 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
743 LDST_SRCDST_BYTE_CONTEXT
);
745 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
748 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
749 dev_err(jrdev
, "unable to map shared descriptor\n");
753 print_hex_dump(KERN_ERR
, "aead givenc shdesc@"__stringify(__LINE__
)": ",
754 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
755 desc_bytes(desc
), 1);
762 static int aead_setauthsize(struct crypto_aead
*authenc
,
763 unsigned int authsize
)
765 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
767 ctx
->authsize
= authsize
;
768 aead_set_sh_desc(authenc
);
773 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
775 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
776 struct device
*jrdev
= ctx
->jrdev
;
777 bool keys_fit_inline
= false;
778 u32
*key_jump_cmd
, *zero_payload_jump_cmd
,
779 *zero_assoc_jump_cmd1
, *zero_assoc_jump_cmd2
;
782 if (!ctx
->enckeylen
|| !ctx
->authsize
)
786 * AES GCM encrypt shared descriptor
787 * Job Descriptor and Shared Descriptor
788 * must fit into the 64-word Descriptor h/w Buffer
790 if (DESC_GCM_ENC_LEN
+ GCM_DESC_JOB_IO_LEN
+
791 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
792 keys_fit_inline
= true;
794 desc
= ctx
->sh_desc_enc
;
796 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
798 /* skip key loading if they are loaded due to sharing */
799 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
800 JUMP_COND_SHRD
| JUMP_COND_SELF
);
802 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
803 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
805 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
806 CLASS_1
| KEY_DEST_CLASS_REG
);
807 set_jump_tgt_here(desc
, key_jump_cmd
);
809 /* class 1 operation */
810 append_operation(desc
, ctx
->class1_alg_type
|
811 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
813 /* if assoclen + cryptlen is ZERO, skip to ICV write */
814 append_math_sub(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
815 zero_assoc_jump_cmd2
= append_jump(desc
, JUMP_TEST_ALL
|
818 /* if assoclen is ZERO, skip reading the assoc data */
819 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
820 zero_assoc_jump_cmd1
= append_jump(desc
, JUMP_TEST_ALL
|
823 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
825 /* skip assoc data */
826 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
828 /* cryptlen = seqinlen - assoclen */
829 append_math_sub(desc
, VARSEQOUTLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
831 /* if cryptlen is ZERO jump to zero-payload commands */
832 zero_payload_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
|
835 /* read assoc data */
836 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
837 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
838 set_jump_tgt_here(desc
, zero_assoc_jump_cmd1
);
840 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
842 /* write encrypted data */
843 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
845 /* read payload data */
846 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
847 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
849 /* jump the zero-payload commands */
850 append_jump(desc
, JUMP_TEST_ALL
| 2);
852 /* zero-payload commands */
853 set_jump_tgt_here(desc
, zero_payload_jump_cmd
);
855 /* read assoc data */
856 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
857 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_LAST1
);
859 /* There is no input data */
860 set_jump_tgt_here(desc
, zero_assoc_jump_cmd2
);
863 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
864 LDST_SRCDST_BYTE_CONTEXT
);
866 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
869 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
870 dev_err(jrdev
, "unable to map shared descriptor\n");
874 print_hex_dump(KERN_ERR
, "gcm enc shdesc@"__stringify(__LINE__
)": ",
875 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
876 desc_bytes(desc
), 1);
880 * Job Descriptor and Shared Descriptors
881 * must all fit into the 64-word Descriptor h/w Buffer
883 keys_fit_inline
= false;
884 if (DESC_GCM_DEC_LEN
+ GCM_DESC_JOB_IO_LEN
+
885 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
886 keys_fit_inline
= true;
888 desc
= ctx
->sh_desc_dec
;
890 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
892 /* skip key loading if they are loaded due to sharing */
893 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
894 JUMP_TEST_ALL
| JUMP_COND_SHRD
|
897 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
898 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
900 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
901 CLASS_1
| KEY_DEST_CLASS_REG
);
902 set_jump_tgt_here(desc
, key_jump_cmd
);
904 /* class 1 operation */
905 append_operation(desc
, ctx
->class1_alg_type
|
906 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
908 /* if assoclen is ZERO, skip reading the assoc data */
909 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
910 zero_assoc_jump_cmd1
= append_jump(desc
, JUMP_TEST_ALL
|
913 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
915 /* skip assoc data */
916 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
918 /* read assoc data */
919 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
920 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
922 set_jump_tgt_here(desc
, zero_assoc_jump_cmd1
);
924 /* cryptlen = seqoutlen - assoclen */
925 append_math_sub(desc
, VARSEQINLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
927 /* jump to zero-payload command if cryptlen is zero */
928 zero_payload_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
|
931 append_math_sub(desc
, VARSEQOUTLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
933 /* store encrypted data */
934 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
936 /* read payload data */
937 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
938 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
940 /* zero-payload command */
941 set_jump_tgt_here(desc
, zero_payload_jump_cmd
);
944 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
945 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
947 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
950 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
951 dev_err(jrdev
, "unable to map shared descriptor\n");
955 print_hex_dump(KERN_ERR
, "gcm dec shdesc@"__stringify(__LINE__
)": ",
956 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
957 desc_bytes(desc
), 1);
963 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
965 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
967 ctx
->authsize
= authsize
;
968 gcm_set_sh_desc(authenc
);
973 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
975 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
976 struct device
*jrdev
= ctx
->jrdev
;
977 bool keys_fit_inline
= false;
981 if (!ctx
->enckeylen
|| !ctx
->authsize
)
985 * RFC4106 encrypt shared descriptor
986 * Job Descriptor and Shared Descriptor
987 * must fit into the 64-word Descriptor h/w Buffer
989 if (DESC_RFC4106_ENC_LEN
+ GCM_DESC_JOB_IO_LEN
+
990 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
991 keys_fit_inline
= true;
993 desc
= ctx
->sh_desc_enc
;
995 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
997 /* Skip key loading if it is loaded due to sharing */
998 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1000 if (keys_fit_inline
)
1001 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1002 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1004 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1005 CLASS_1
| KEY_DEST_CLASS_REG
);
1006 set_jump_tgt_here(desc
, key_jump_cmd
);
1008 /* Class 1 operation */
1009 append_operation(desc
, ctx
->class1_alg_type
|
1010 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1012 append_math_sub_imm_u32(desc
, VARSEQINLEN
, REG3
, IMM
, 8);
1013 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1015 /* Read assoc data */
1016 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1017 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
1020 append_seq_fifo_load(desc
, 8, FIFOLD_CLASS_SKIP
);
1022 /* Will read cryptlen bytes */
1023 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1025 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1026 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLD_TYPE_MSG
);
1028 /* Skip assoc data */
1029 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
1031 /* cryptlen = seqoutlen - assoclen */
1032 append_math_sub(desc
, VARSEQOUTLEN
, VARSEQINLEN
, REG0
, CAAM_CMD_SZ
);
1034 /* Write encrypted data */
1035 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1037 /* Read payload data */
1038 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1039 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
1042 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1043 LDST_SRCDST_BYTE_CONTEXT
);
1045 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1048 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1049 dev_err(jrdev
, "unable to map shared descriptor\n");
1053 print_hex_dump(KERN_ERR
, "rfc4106 enc shdesc@"__stringify(__LINE__
)": ",
1054 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1055 desc_bytes(desc
), 1);
1059 * Job Descriptor and Shared Descriptors
1060 * must all fit into the 64-word Descriptor h/w Buffer
1062 keys_fit_inline
= false;
1063 if (DESC_RFC4106_DEC_LEN
+ DESC_JOB_IO_LEN
+
1064 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1065 keys_fit_inline
= true;
1067 desc
= ctx
->sh_desc_dec
;
1069 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1071 /* Skip key loading if it is loaded due to sharing */
1072 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
1073 JUMP_TEST_ALL
| JUMP_COND_SHRD
);
1074 if (keys_fit_inline
)
1075 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1076 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1078 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1079 CLASS_1
| KEY_DEST_CLASS_REG
);
1080 set_jump_tgt_here(desc
, key_jump_cmd
);
1082 /* Class 1 operation */
1083 append_operation(desc
, ctx
->class1_alg_type
|
1084 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
1086 append_math_sub_imm_u32(desc
, VARSEQINLEN
, REG3
, IMM
, 8);
1087 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1089 /* Read assoc data */
1090 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1091 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
1094 append_seq_fifo_load(desc
, 8, FIFOLD_CLASS_SKIP
);
1096 /* Will read cryptlen bytes */
1097 append_math_sub(desc
, VARSEQINLEN
, SEQOUTLEN
, REG3
, CAAM_CMD_SZ
);
1099 /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
1100 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLD_TYPE_MSG
);
1102 /* Skip assoc data */
1103 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_SKIP
| FIFOLDST_VLF
);
1105 /* Will write cryptlen bytes */
1106 append_math_sub(desc
, VARSEQOUTLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1108 /* Store payload data */
1109 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1111 /* Read encrypted data */
1112 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1113 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
1116 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
1117 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
1119 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1122 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1123 dev_err(jrdev
, "unable to map shared descriptor\n");
1127 print_hex_dump(KERN_ERR
, "rfc4106 dec shdesc@"__stringify(__LINE__
)": ",
1128 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1129 desc_bytes(desc
), 1);
1135 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
1136 unsigned int authsize
)
1138 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
1140 ctx
->authsize
= authsize
;
1141 rfc4106_set_sh_desc(authenc
);
1146 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
1148 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1149 struct device
*jrdev
= ctx
->jrdev
;
1150 bool keys_fit_inline
= false;
1152 u32
*read_move_cmd
, *write_move_cmd
;
1155 if (!ctx
->enckeylen
|| !ctx
->authsize
)
1159 * RFC4543 encrypt shared descriptor
1160 * Job Descriptor and Shared Descriptor
1161 * must fit into the 64-word Descriptor h/w Buffer
1163 if (DESC_RFC4543_ENC_LEN
+ GCM_DESC_JOB_IO_LEN
+
1164 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1165 keys_fit_inline
= true;
1167 desc
= ctx
->sh_desc_enc
;
1169 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1171 /* Skip key loading if it is loaded due to sharing */
1172 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1174 if (keys_fit_inline
)
1175 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1176 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1178 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1179 CLASS_1
| KEY_DEST_CLASS_REG
);
1180 set_jump_tgt_here(desc
, key_jump_cmd
);
1182 /* Class 1 operation */
1183 append_operation(desc
, ctx
->class1_alg_type
|
1184 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1186 /* assoclen + cryptlen = seqinlen */
1187 append_math_sub(desc
, REG3
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1190 * MOVE_LEN opcode is not available in all SEC HW revisions,
1191 * thus need to do some magic, i.e. self-patch the descriptor
1194 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_MATH3
|
1195 (0x6 << MOVE_LEN_SHIFT
));
1196 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
| MOVE_DEST_DESCBUF
|
1197 (0x8 << MOVE_LEN_SHIFT
));
1199 /* Will read assoclen + cryptlen bytes */
1200 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1202 /* Will write assoclen + cryptlen bytes */
1203 append_math_sub(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1205 /* Read and write assoclen + cryptlen bytes */
1206 aead_append_src_dst(desc
, FIFOLD_TYPE_AAD
);
1208 set_move_tgt_here(desc
, read_move_cmd
);
1209 set_move_tgt_here(desc
, write_move_cmd
);
1210 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1211 /* Move payload data to OFIFO */
1212 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
);
1215 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1216 LDST_SRCDST_BYTE_CONTEXT
);
1218 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1221 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1222 dev_err(jrdev
, "unable to map shared descriptor\n");
1226 print_hex_dump(KERN_ERR
, "rfc4543 enc shdesc@"__stringify(__LINE__
)": ",
1227 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1228 desc_bytes(desc
), 1);
1232 * Job Descriptor and Shared Descriptors
1233 * must all fit into the 64-word Descriptor h/w Buffer
1235 keys_fit_inline
= false;
1236 if (DESC_RFC4543_DEC_LEN
+ GCM_DESC_JOB_IO_LEN
+
1237 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1238 keys_fit_inline
= true;
1240 desc
= ctx
->sh_desc_dec
;
1242 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1244 /* Skip key loading if it is loaded due to sharing */
1245 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
1246 JUMP_TEST_ALL
| JUMP_COND_SHRD
);
1247 if (keys_fit_inline
)
1248 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1249 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1251 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1252 CLASS_1
| KEY_DEST_CLASS_REG
);
1253 set_jump_tgt_here(desc
, key_jump_cmd
);
1255 /* Class 1 operation */
1256 append_operation(desc
, ctx
->class1_alg_type
|
1257 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
1259 /* assoclen + cryptlen = seqoutlen */
1260 append_math_sub(desc
, REG3
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1263 * MOVE_LEN opcode is not available in all SEC HW revisions,
1264 * thus need to do some magic, i.e. self-patch the descriptor
1267 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_MATH3
|
1268 (0x6 << MOVE_LEN_SHIFT
));
1269 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
| MOVE_DEST_DESCBUF
|
1270 (0x8 << MOVE_LEN_SHIFT
));
1272 /* Will read assoclen + cryptlen bytes */
1273 append_math_sub(desc
, VARSEQINLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1275 /* Will write assoclen + cryptlen bytes */
1276 append_math_sub(desc
, VARSEQOUTLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1278 /* Store payload data */
1279 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1281 /* In-snoop assoclen + cryptlen data */
1282 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
| FIFOLDST_VLF
|
1283 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_LAST2FLUSH1
);
1285 set_move_tgt_here(desc
, read_move_cmd
);
1286 set_move_tgt_here(desc
, write_move_cmd
);
1287 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1288 /* Move payload data to OFIFO */
1289 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
);
1290 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
1293 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
1294 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
1296 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1299 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1300 dev_err(jrdev
, "unable to map shared descriptor\n");
1304 print_hex_dump(KERN_ERR
, "rfc4543 dec shdesc@"__stringify(__LINE__
)": ",
1305 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1306 desc_bytes(desc
), 1);
1312 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
1313 unsigned int authsize
)
1315 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
1317 ctx
->authsize
= authsize
;
1318 rfc4543_set_sh_desc(authenc
);
1323 static u32
gen_split_aead_key(struct caam_ctx
*ctx
, const u8
*key_in
,
1326 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
1327 ctx
->split_key_pad_len
, key_in
, authkeylen
,
1331 static int aead_setkey(struct crypto_aead
*aead
,
1332 const u8
*key
, unsigned int keylen
)
1334 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1335 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
1336 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1337 struct device
*jrdev
= ctx
->jrdev
;
1338 struct crypto_authenc_keys keys
;
1341 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
1344 /* Pick class 2 key length from algorithm submask */
1345 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1346 OP_ALG_ALGSEL_SHIFT
] * 2;
1347 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
1349 if (ctx
->split_key_pad_len
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
1353 printk(KERN_ERR
"keylen %d enckeylen %d authkeylen %d\n",
1354 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
1356 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
1357 ctx
->split_key_len
, ctx
->split_key_pad_len
);
1358 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1359 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1362 ret
= gen_split_aead_key(ctx
, keys
.authkey
, keys
.authkeylen
);
1367 /* postpend encryption key to auth split key */
1368 memcpy(ctx
->key
+ ctx
->split_key_pad_len
, keys
.enckey
, keys
.enckeylen
);
1370 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
+
1371 keys
.enckeylen
, DMA_TO_DEVICE
);
1372 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1373 dev_err(jrdev
, "unable to map key i/o memory\n");
1377 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
1378 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
1379 ctx
->split_key_pad_len
+ keys
.enckeylen
, 1);
1382 ctx
->enckeylen
= keys
.enckeylen
;
1384 ret
= aead_set_sh_desc(aead
);
1386 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
+
1387 keys
.enckeylen
, DMA_TO_DEVICE
);
1392 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1396 static int gcm_setkey(struct crypto_aead
*aead
,
1397 const u8
*key
, unsigned int keylen
)
1399 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1400 struct device
*jrdev
= ctx
->jrdev
;
1404 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1405 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1408 memcpy(ctx
->key
, key
, keylen
);
1409 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
1411 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1412 dev_err(jrdev
, "unable to map key i/o memory\n");
1415 ctx
->enckeylen
= keylen
;
1417 ret
= gcm_set_sh_desc(aead
);
1419 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1426 static int rfc4106_setkey(struct crypto_aead
*aead
,
1427 const u8
*key
, unsigned int keylen
)
1429 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1430 struct device
*jrdev
= ctx
->jrdev
;
1437 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1438 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1441 memcpy(ctx
->key
, key
, keylen
);
1444 * The last four bytes of the key material are used as the salt value
1445 * in the nonce. Update the AES key length.
1447 ctx
->enckeylen
= keylen
- 4;
1449 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->enckeylen
,
1451 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1452 dev_err(jrdev
, "unable to map key i/o memory\n");
1456 ret
= rfc4106_set_sh_desc(aead
);
1458 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1465 static int rfc4543_setkey(struct crypto_aead
*aead
,
1466 const u8
*key
, unsigned int keylen
)
1468 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1469 struct device
*jrdev
= ctx
->jrdev
;
1476 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1477 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1480 memcpy(ctx
->key
, key
, keylen
);
1483 * The last four bytes of the key material are used as the salt value
1484 * in the nonce. Update the AES key length.
1486 ctx
->enckeylen
= keylen
- 4;
1488 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->enckeylen
,
1490 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1491 dev_err(jrdev
, "unable to map key i/o memory\n");
1495 ret
= rfc4543_set_sh_desc(aead
);
1497 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1504 static int ablkcipher_setkey(struct crypto_ablkcipher
*ablkcipher
,
1505 const u8
*key
, unsigned int keylen
)
1507 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1508 struct ablkcipher_tfm
*crt
= &ablkcipher
->base
.crt_ablkcipher
;
1509 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(ablkcipher
);
1510 const char *alg_name
= crypto_tfm_alg_name(tfm
);
1511 struct device
*jrdev
= ctx
->jrdev
;
1517 u32 ctx1_iv_off
= 0;
1518 const bool ctr_mode
= ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) ==
1519 OP_ALG_AAI_CTR_MOD128
);
1520 const bool is_rfc3686
= (ctr_mode
&&
1521 (strstr(alg_name
, "rfc3686") != NULL
));
1524 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1525 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1528 * AES-CTR needs to load IV in CONTEXT1 reg
1529 * at an offset of 128bits (16bytes)
1530 * CONTEXT1[255:128] = IV
1537 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1538 * | *key = {KEY, NONCE}
1541 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
1542 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1545 memcpy(ctx
->key
, key
, keylen
);
1546 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
1548 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1549 dev_err(jrdev
, "unable to map key i/o memory\n");
1552 ctx
->enckeylen
= keylen
;
1554 /* ablkcipher_encrypt shared descriptor */
1555 desc
= ctx
->sh_desc_enc
;
1556 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1557 /* Skip if already shared */
1558 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1561 /* Load class1 key only */
1562 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1563 ctx
->enckeylen
, CLASS_1
|
1564 KEY_DEST_CLASS_REG
);
1566 /* Load nonce into CONTEXT1 reg */
1568 nonce
= (u8
*)key
+ keylen
;
1569 append_load_as_imm(desc
, nonce
, CTR_RFC3686_NONCE_SIZE
,
1570 LDST_CLASS_IND_CCB
|
1571 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
1572 append_move(desc
, MOVE_WAITCOMP
|
1574 MOVE_DEST_CLASS1CTX
|
1575 (16 << MOVE_OFFSET_SHIFT
) |
1576 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
1579 set_jump_tgt_here(desc
, key_jump_cmd
);
1582 append_seq_load(desc
, crt
->ivsize
, LDST_SRCDST_BYTE_CONTEXT
|
1583 LDST_CLASS_1_CCB
| (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
1585 /* Load counter into CONTEXT1 reg */
1587 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
1588 LDST_SRCDST_BYTE_CONTEXT
|
1589 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
1590 LDST_OFFSET_SHIFT
));
1592 /* Load operation */
1593 append_operation(desc
, ctx
->class1_alg_type
|
1594 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1596 /* Perform operation */
1597 ablkcipher_append_src_dst(desc
);
1599 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1602 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1603 dev_err(jrdev
, "unable to map shared descriptor\n");
1607 print_hex_dump(KERN_ERR
,
1608 "ablkcipher enc shdesc@"__stringify(__LINE__
)": ",
1609 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1610 desc_bytes(desc
), 1);
1612 /* ablkcipher_decrypt shared descriptor */
1613 desc
= ctx
->sh_desc_dec
;
1615 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1616 /* Skip if already shared */
1617 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1620 /* Load class1 key only */
1621 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1622 ctx
->enckeylen
, CLASS_1
|
1623 KEY_DEST_CLASS_REG
);
1625 /* Load nonce into CONTEXT1 reg */
1627 nonce
= (u8
*)key
+ keylen
;
1628 append_load_as_imm(desc
, nonce
, CTR_RFC3686_NONCE_SIZE
,
1629 LDST_CLASS_IND_CCB
|
1630 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
1631 append_move(desc
, MOVE_WAITCOMP
|
1633 MOVE_DEST_CLASS1CTX
|
1634 (16 << MOVE_OFFSET_SHIFT
) |
1635 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
1638 set_jump_tgt_here(desc
, key_jump_cmd
);
1641 append_seq_load(desc
, crt
->ivsize
, LDST_SRCDST_BYTE_CONTEXT
|
1642 LDST_CLASS_1_CCB
| (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
1644 /* Load counter into CONTEXT1 reg */
1646 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
1647 LDST_SRCDST_BYTE_CONTEXT
|
1648 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
1649 LDST_OFFSET_SHIFT
));
1651 /* Choose operation */
1653 append_operation(desc
, ctx
->class1_alg_type
|
1654 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
);
1656 append_dec_op1(desc
, ctx
->class1_alg_type
);
1658 /* Perform operation */
1659 ablkcipher_append_src_dst(desc
);
1661 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1664 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1665 dev_err(jrdev
, "unable to map shared descriptor\n");
1670 print_hex_dump(KERN_ERR
,
1671 "ablkcipher dec shdesc@"__stringify(__LINE__
)": ",
1672 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1673 desc_bytes(desc
), 1);
1675 /* ablkcipher_givencrypt shared descriptor */
1676 desc
= ctx
->sh_desc_givenc
;
1678 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1679 /* Skip if already shared */
1680 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1683 /* Load class1 key only */
1684 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1685 ctx
->enckeylen
, CLASS_1
|
1686 KEY_DEST_CLASS_REG
);
1688 /* Load Nonce into CONTEXT1 reg */
1690 nonce
= (u8
*)key
+ keylen
;
1691 append_load_as_imm(desc
, nonce
, CTR_RFC3686_NONCE_SIZE
,
1692 LDST_CLASS_IND_CCB
|
1693 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
1694 append_move(desc
, MOVE_WAITCOMP
|
1696 MOVE_DEST_CLASS1CTX
|
1697 (16 << MOVE_OFFSET_SHIFT
) |
1698 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
1700 set_jump_tgt_here(desc
, key_jump_cmd
);
1703 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
1704 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
1705 NFIFOENTRY_PTYPE_RND
| (crt
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
1706 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
1707 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
1708 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1709 append_move(desc
, MOVE_WAITCOMP
|
1711 MOVE_DEST_CLASS1CTX
|
1712 (crt
->ivsize
<< MOVE_LEN_SHIFT
) |
1713 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
));
1714 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
1716 /* Copy generated IV to memory */
1717 append_seq_store(desc
, crt
->ivsize
,
1718 LDST_SRCDST_BYTE_CONTEXT
| LDST_CLASS_1_CCB
|
1719 (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
1721 /* Load Counter into CONTEXT1 reg */
1723 append_load_imm_be32(desc
, 1, LDST_IMM
| LDST_CLASS_1_CCB
|
1724 LDST_SRCDST_BYTE_CONTEXT
|
1725 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
1726 LDST_OFFSET_SHIFT
));
1729 append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
| JUMP_COND_NCP
|
1730 (1 << JUMP_OFFSET_SHIFT
));
1732 /* Load operation */
1733 append_operation(desc
, ctx
->class1_alg_type
|
1734 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1736 /* Perform operation */
1737 ablkcipher_append_src_dst(desc
);
1739 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
1742 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
1743 dev_err(jrdev
, "unable to map shared descriptor\n");
1747 print_hex_dump(KERN_ERR
,
1748 "ablkcipher givenc shdesc@" __stringify(__LINE__
) ": ",
1749 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1750 desc_bytes(desc
), 1);
1756 static int xts_ablkcipher_setkey(struct crypto_ablkcipher
*ablkcipher
,
1757 const u8
*key
, unsigned int keylen
)
1759 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1760 struct device
*jrdev
= ctx
->jrdev
;
1761 u32
*key_jump_cmd
, *desc
;
1762 __be64 sector_size
= cpu_to_be64(512);
1764 if (keylen
!= 2 * AES_MIN_KEY_SIZE
&& keylen
!= 2 * AES_MAX_KEY_SIZE
) {
1765 crypto_ablkcipher_set_flags(ablkcipher
,
1766 CRYPTO_TFM_RES_BAD_KEY_LEN
);
1767 dev_err(jrdev
, "key size mismatch\n");
1771 memcpy(ctx
->key
, key
, keylen
);
1772 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
, DMA_TO_DEVICE
);
1773 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1774 dev_err(jrdev
, "unable to map key i/o memory\n");
1777 ctx
->enckeylen
= keylen
;
1779 /* xts_ablkcipher_encrypt shared descriptor */
1780 desc
= ctx
->sh_desc_enc
;
1781 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1782 /* Skip if already shared */
1783 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1786 /* Load class1 keys only */
1787 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1788 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1790 /* Load sector size with index 40 bytes (0x28) */
1791 append_cmd(desc
, CMD_LOAD
| IMMEDIATE
| LDST_SRCDST_BYTE_CONTEXT
|
1792 LDST_CLASS_1_CCB
| (0x28 << LDST_OFFSET_SHIFT
) | 8);
1793 append_data(desc
, (void *)§or_size
, 8);
1795 set_jump_tgt_here(desc
, key_jump_cmd
);
1798 * create sequence for loading the sector index
1799 * Upper 8B of IV - will be used as sector index
1800 * Lower 8B of IV - will be discarded
1802 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
1803 LDST_CLASS_1_CCB
| (0x20 << LDST_OFFSET_SHIFT
) | 8);
1804 append_seq_fifo_load(desc
, 8, FIFOLD_CLASS_SKIP
);
1806 /* Load operation */
1807 append_operation(desc
, ctx
->class1_alg_type
| OP_ALG_AS_INITFINAL
|
1810 /* Perform operation */
1811 ablkcipher_append_src_dst(desc
);
1813 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
1815 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1816 dev_err(jrdev
, "unable to map shared descriptor\n");
1820 print_hex_dump(KERN_ERR
,
1821 "xts ablkcipher enc shdesc@" __stringify(__LINE__
) ": ",
1822 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1825 /* xts_ablkcipher_decrypt shared descriptor */
1826 desc
= ctx
->sh_desc_dec
;
1828 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1829 /* Skip if already shared */
1830 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1833 /* Load class1 key only */
1834 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1835 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1837 /* Load sector size with index 40 bytes (0x28) */
1838 append_cmd(desc
, CMD_LOAD
| IMMEDIATE
| LDST_SRCDST_BYTE_CONTEXT
|
1839 LDST_CLASS_1_CCB
| (0x28 << LDST_OFFSET_SHIFT
) | 8);
1840 append_data(desc
, (void *)§or_size
, 8);
1842 set_jump_tgt_here(desc
, key_jump_cmd
);
1845 * create sequence for loading the sector index
1846 * Upper 8B of IV - will be used as sector index
1847 * Lower 8B of IV - will be discarded
1849 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
1850 LDST_CLASS_1_CCB
| (0x20 << LDST_OFFSET_SHIFT
) | 8);
1851 append_seq_fifo_load(desc
, 8, FIFOLD_CLASS_SKIP
);
1853 /* Load operation */
1854 append_dec_op1(desc
, ctx
->class1_alg_type
);
1856 /* Perform operation */
1857 ablkcipher_append_src_dst(desc
);
1859 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
1861 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1862 dma_unmap_single(jrdev
, ctx
->sh_desc_enc_dma
,
1863 desc_bytes(ctx
->sh_desc_enc
), DMA_TO_DEVICE
);
1864 dev_err(jrdev
, "unable to map shared descriptor\n");
1868 print_hex_dump(KERN_ERR
,
1869 "xts ablkcipher dec shdesc@" __stringify(__LINE__
) ": ",
1870 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1877 * aead_edesc - s/w-extended aead descriptor
1878 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
1879 * @src_nents: number of segments in input scatterlist
1880 * @dst_nents: number of segments in output scatterlist
1881 * @iv_dma: dma address of iv for checking continuity and link table
1882 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1883 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1884 * @sec4_sg_dma: bus physical mapped address of h/w link table
1885 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1893 dma_addr_t sec4_sg_dma
;
1894 struct sec4_sg_entry
*sec4_sg
;
1899 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
1900 * @src_nents: number of segments in input scatterlist
1901 * @dst_nents: number of segments in output scatterlist
1902 * @iv_dma: dma address of iv for checking continuity and link table
1903 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1904 * @sec4_sg_bytes: length of dma mapped sec4_sg space
1905 * @sec4_sg_dma: bus physical mapped address of h/w link table
1906 * @hw_desc: the h/w job descriptor followed by any referenced link tables
1908 struct ablkcipher_edesc
{
1913 dma_addr_t sec4_sg_dma
;
1914 struct sec4_sg_entry
*sec4_sg
;
1918 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
1919 struct scatterlist
*dst
, int src_nents
,
1921 dma_addr_t iv_dma
, int ivsize
, dma_addr_t sec4_sg_dma
,
1925 dma_unmap_sg(dev
, src
, src_nents
? : 1, DMA_TO_DEVICE
);
1926 dma_unmap_sg(dev
, dst
, dst_nents
? : 1, DMA_FROM_DEVICE
);
1928 dma_unmap_sg(dev
, src
, src_nents
? : 1, DMA_BIDIRECTIONAL
);
1932 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1934 dma_unmap_single(dev
, sec4_sg_dma
, sec4_sg_bytes
,
1938 static void aead_unmap(struct device
*dev
,
1939 struct aead_edesc
*edesc
,
1940 struct aead_request
*req
)
1942 caam_unmap(dev
, req
->src
, req
->dst
,
1943 edesc
->src_nents
, edesc
->dst_nents
, 0, 0,
1944 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
1947 static void ablkcipher_unmap(struct device
*dev
,
1948 struct ablkcipher_edesc
*edesc
,
1949 struct ablkcipher_request
*req
)
1951 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
1952 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
1954 caam_unmap(dev
, req
->src
, req
->dst
,
1955 edesc
->src_nents
, edesc
->dst_nents
,
1956 edesc
->iv_dma
, ivsize
,
1957 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
1960 static void aead_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
1963 struct aead_request
*req
= context
;
1964 struct aead_edesc
*edesc
;
1967 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
1970 edesc
= container_of(desc
, struct aead_edesc
, hw_desc
[0]);
1973 caam_jr_strstatus(jrdev
, err
);
1975 aead_unmap(jrdev
, edesc
, req
);
1979 aead_request_complete(req
, err
);
1982 static void aead_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
1985 struct aead_request
*req
= context
;
1986 struct aead_edesc
*edesc
;
1989 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
1992 edesc
= container_of(desc
, struct aead_edesc
, hw_desc
[0]);
1995 caam_jr_strstatus(jrdev
, err
);
1997 aead_unmap(jrdev
, edesc
, req
);
2000 * verify hw auth check passed else return -EBADMSG
2002 if ((err
& JRSTA_CCBERR_ERRID_MASK
) == JRSTA_CCBERR_ERRID_ICVCHK
)
2007 aead_request_complete(req
, err
);
2010 static void ablkcipher_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2013 struct ablkcipher_request
*req
= context
;
2014 struct ablkcipher_edesc
*edesc
;
2016 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2017 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2019 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2022 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
2023 offsetof(struct ablkcipher_edesc
, hw_desc
));
2026 caam_jr_strstatus(jrdev
, err
);
2029 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2030 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2031 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
2032 dbg_dump_sg(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2033 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
2034 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1, true);
2037 ablkcipher_unmap(jrdev
, edesc
, req
);
2040 ablkcipher_request_complete(req
, err
);
2043 static void ablkcipher_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2046 struct ablkcipher_request
*req
= context
;
2047 struct ablkcipher_edesc
*edesc
;
2049 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2050 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2052 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2055 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
2056 offsetof(struct ablkcipher_edesc
, hw_desc
));
2058 caam_jr_strstatus(jrdev
, err
);
2061 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2062 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2064 dbg_dump_sg(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2065 DUMP_PREFIX_ADDRESS
, 16, 4, req
->dst
,
2066 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1, true);
2069 ablkcipher_unmap(jrdev
, edesc
, req
);
2072 ablkcipher_request_complete(req
, err
);
2076 * Fill in aead job descriptor
2078 static void init_aead_job(struct aead_request
*req
,
2079 struct aead_edesc
*edesc
,
2080 bool all_contig
, bool encrypt
)
2082 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2083 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2084 int authsize
= ctx
->authsize
;
2085 u32
*desc
= edesc
->hw_desc
;
2086 u32 out_options
, in_options
;
2087 dma_addr_t dst_dma
, src_dma
;
2088 int len
, sec4_sg_index
= 0;
2092 sh_desc
= encrypt
? ctx
->sh_desc_enc
: ctx
->sh_desc_dec
;
2093 ptr
= encrypt
? ctx
->sh_desc_enc_dma
: ctx
->sh_desc_dec_dma
;
2095 len
= desc_len(sh_desc
);
2096 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2099 src_dma
= sg_dma_address(req
->src
);
2102 src_dma
= edesc
->sec4_sg_dma
;
2103 sec4_sg_index
+= edesc
->src_nents
;
2104 in_options
= LDST_SGF
;
2107 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ req
->cryptlen
,
2111 out_options
= in_options
;
2113 if (unlikely(req
->src
!= req
->dst
)) {
2114 if (!edesc
->dst_nents
) {
2115 dst_dma
= sg_dma_address(req
->dst
);
2117 dst_dma
= edesc
->sec4_sg_dma
+
2119 sizeof(struct sec4_sg_entry
);
2120 out_options
= LDST_SGF
;
2125 append_seq_out_ptr(desc
, dst_dma
,
2126 req
->assoclen
+ req
->cryptlen
+ authsize
,
2129 append_seq_out_ptr(desc
, dst_dma
,
2130 req
->assoclen
+ req
->cryptlen
- authsize
,
2133 /* REG3 = assoclen */
2134 append_math_add_imm_u32(desc
, REG3
, ZERO
, IMM
, req
->assoclen
);
2137 static void init_gcm_job(struct aead_request
*req
,
2138 struct aead_edesc
*edesc
,
2139 bool all_contig
, bool encrypt
)
2141 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2142 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2143 unsigned int ivsize
= crypto_aead_ivsize(aead
);
2144 u32
*desc
= edesc
->hw_desc
;
2145 bool generic_gcm
= (ivsize
== 12);
2148 init_aead_job(req
, edesc
, all_contig
, encrypt
);
2150 /* BUG This should not be specific to generic GCM. */
2152 if (encrypt
&& generic_gcm
&& !(req
->assoclen
+ req
->cryptlen
))
2153 last
= FIFOLD_TYPE_LAST1
;
2156 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
2157 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
| 12 | last
);
2160 append_data(desc
, ctx
->key
+ ctx
->enckeylen
, 4);
2162 append_data(desc
, req
->iv
, ivsize
);
2163 /* End of blank commands */
2166 static void init_authenc_job(struct aead_request
*req
,
2167 struct aead_edesc
*edesc
,
2168 bool all_contig
, bool encrypt
)
2170 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2171 struct caam_aead_alg
*alg
= container_of(crypto_aead_alg(aead
),
2172 struct caam_aead_alg
, aead
);
2173 unsigned int ivsize
= crypto_aead_ivsize(aead
);
2174 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2175 const bool ctr_mode
= ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) ==
2176 OP_ALG_AAI_CTR_MOD128
);
2177 const bool is_rfc3686
= alg
->caam
.rfc3686
;
2178 u32
*desc
= edesc
->hw_desc
;
2182 * AES-CTR needs to load IV in CONTEXT1 reg
2183 * at an offset of 128bits (16bytes)
2184 * CONTEXT1[255:128] = IV
2191 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
2194 ivoffset
= 16 + CTR_RFC3686_NONCE_SIZE
;
2196 init_aead_job(req
, edesc
, all_contig
, encrypt
);
2198 if (ivsize
&& ((is_rfc3686
&& encrypt
) || !alg
->caam
.geniv
))
2199 append_load_as_imm(desc
, req
->iv
, ivsize
,
2201 LDST_SRCDST_BYTE_CONTEXT
|
2202 (ivoffset
<< LDST_OFFSET_SHIFT
));
2206 * Fill in ablkcipher job descriptor
2208 static void init_ablkcipher_job(u32
*sh_desc
, dma_addr_t ptr
,
2209 struct ablkcipher_edesc
*edesc
,
2210 struct ablkcipher_request
*req
,
2213 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2214 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2215 u32
*desc
= edesc
->hw_desc
;
2216 u32 out_options
= 0, in_options
;
2217 dma_addr_t dst_dma
, src_dma
;
2218 int len
, sec4_sg_index
= 0;
2221 bool may_sleep
= ((req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2222 CRYPTO_TFM_REQ_MAY_SLEEP
)) != 0);
2223 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
2224 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2226 printk(KERN_ERR
"asked=%d, nbytes%d\n", (int)edesc
->src_nents
? 100 : req
->nbytes
, req
->nbytes
);
2227 dbg_dump_sg(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
2228 DUMP_PREFIX_ADDRESS
, 16, 4, req
->src
,
2229 edesc
->src_nents
? 100 : req
->nbytes
, 1, may_sleep
);
2232 len
= desc_len(sh_desc
);
2233 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2236 src_dma
= edesc
->iv_dma
;
2239 src_dma
= edesc
->sec4_sg_dma
;
2240 sec4_sg_index
+= edesc
->src_nents
+ 1;
2241 in_options
= LDST_SGF
;
2243 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
+ ivsize
, in_options
);
2245 if (likely(req
->src
== req
->dst
)) {
2246 if (!edesc
->src_nents
&& iv_contig
) {
2247 dst_dma
= sg_dma_address(req
->src
);
2249 dst_dma
= edesc
->sec4_sg_dma
+
2250 sizeof(struct sec4_sg_entry
);
2251 out_options
= LDST_SGF
;
2254 if (!edesc
->dst_nents
) {
2255 dst_dma
= sg_dma_address(req
->dst
);
2257 dst_dma
= edesc
->sec4_sg_dma
+
2258 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
2259 out_options
= LDST_SGF
;
2262 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
, out_options
);
2266 * Fill in ablkcipher givencrypt job descriptor
2268 static void init_ablkcipher_giv_job(u32
*sh_desc
, dma_addr_t ptr
,
2269 struct ablkcipher_edesc
*edesc
,
2270 struct ablkcipher_request
*req
,
2273 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2274 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2275 u32
*desc
= edesc
->hw_desc
;
2276 u32 out_options
, in_options
;
2277 dma_addr_t dst_dma
, src_dma
;
2278 int len
, sec4_sg_index
= 0;
2281 bool may_sleep
= ((req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2282 CRYPTO_TFM_REQ_MAY_SLEEP
)) != 0);
2283 print_hex_dump(KERN_ERR
, "presciv@" __stringify(__LINE__
) ": ",
2284 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2286 dbg_dump_sg(KERN_ERR
, "src @" __stringify(__LINE__
) ": ",
2287 DUMP_PREFIX_ADDRESS
, 16, 4, req
->src
,
2288 edesc
->src_nents
? 100 : req
->nbytes
, 1, may_sleep
);
2291 len
= desc_len(sh_desc
);
2292 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2294 if (!edesc
->src_nents
) {
2295 src_dma
= sg_dma_address(req
->src
);
2298 src_dma
= edesc
->sec4_sg_dma
;
2299 sec4_sg_index
+= edesc
->src_nents
;
2300 in_options
= LDST_SGF
;
2302 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, in_options
);
2305 dst_dma
= edesc
->iv_dma
;
2308 dst_dma
= edesc
->sec4_sg_dma
+
2309 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
2310 out_options
= LDST_SGF
;
2312 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
+ ivsize
, out_options
);
2316 * allocate and map the aead extended descriptor
2318 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
2319 int desc_bytes
, bool *all_contig_ptr
,
2322 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2323 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2324 struct device
*jrdev
= ctx
->jrdev
;
2325 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2326 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2327 int src_nents
, dst_nents
= 0;
2328 struct aead_edesc
*edesc
;
2330 bool all_contig
= true;
2331 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
2332 unsigned int authsize
= ctx
->authsize
;
2334 if (unlikely(req
->dst
!= req
->src
)) {
2335 src_nents
= sg_count(req
->src
, req
->assoclen
+ req
->cryptlen
);
2336 dst_nents
= sg_count(req
->dst
,
2337 req
->assoclen
+ req
->cryptlen
+
2338 (encrypt
? authsize
: (-authsize
)));
2340 src_nents
= sg_count(req
->src
,
2341 req
->assoclen
+ req
->cryptlen
+
2342 (encrypt
? authsize
: 0));
2345 /* Check if data are contiguous. */
2346 all_contig
= !src_nents
;
2348 src_nents
= src_nents
? : 1;
2349 sec4_sg_len
= src_nents
;
2352 sec4_sg_len
+= dst_nents
;
2354 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
2356 /* allocate space for base edesc and hw desc commands, link tables */
2357 edesc
= kzalloc(sizeof(*edesc
) + desc_bytes
+ sec4_sg_bytes
,
2360 dev_err(jrdev
, "could not allocate extended descriptor\n");
2361 return ERR_PTR(-ENOMEM
);
2364 if (likely(req
->src
== req
->dst
)) {
2365 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2367 if (unlikely(!sgc
)) {
2368 dev_err(jrdev
, "unable to map source\n");
2370 return ERR_PTR(-ENOMEM
);
2373 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2375 if (unlikely(!sgc
)) {
2376 dev_err(jrdev
, "unable to map source\n");
2378 return ERR_PTR(-ENOMEM
);
2381 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
2383 if (unlikely(!sgc
)) {
2384 dev_err(jrdev
, "unable to map destination\n");
2385 dma_unmap_sg(jrdev
, req
->src
, src_nents
? : 1,
2388 return ERR_PTR(-ENOMEM
);
2392 edesc
->src_nents
= src_nents
;
2393 edesc
->dst_nents
= dst_nents
;
2394 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct aead_edesc
) +
2396 *all_contig_ptr
= all_contig
;
2400 sg_to_sec4_sg_last(req
->src
, src_nents
,
2401 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2402 sec4_sg_index
+= src_nents
;
2405 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
2406 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2412 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
2413 sec4_sg_bytes
, DMA_TO_DEVICE
);
2414 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
2415 dev_err(jrdev
, "unable to map S/G table\n");
2416 aead_unmap(jrdev
, edesc
, req
);
2418 return ERR_PTR(-ENOMEM
);
2421 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
2426 static int gcm_encrypt(struct aead_request
*req
)
2428 struct aead_edesc
*edesc
;
2429 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2430 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2431 struct device
*jrdev
= ctx
->jrdev
;
2436 /* allocate extended descriptor */
2437 edesc
= aead_edesc_alloc(req
, GCM_DESC_JOB_IO_LEN
, &all_contig
, true);
2439 return PTR_ERR(edesc
);
2441 /* Create and submit job descriptor */
2442 init_gcm_job(req
, edesc
, all_contig
, true);
2444 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2445 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2446 desc_bytes(edesc
->hw_desc
), 1);
2449 desc
= edesc
->hw_desc
;
2450 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
2454 aead_unmap(jrdev
, edesc
, req
);
2461 static int ipsec_gcm_encrypt(struct aead_request
*req
)
2463 if (req
->assoclen
< 8)
2466 return gcm_encrypt(req
);
2469 static int aead_encrypt(struct aead_request
*req
)
2471 struct aead_edesc
*edesc
;
2472 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2473 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2474 struct device
*jrdev
= ctx
->jrdev
;
2479 /* allocate extended descriptor */
2480 edesc
= aead_edesc_alloc(req
, AUTHENC_DESC_JOB_IO_LEN
,
2483 return PTR_ERR(edesc
);
2485 /* Create and submit job descriptor */
2486 init_authenc_job(req
, edesc
, all_contig
, true);
2488 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2489 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2490 desc_bytes(edesc
->hw_desc
), 1);
2493 desc
= edesc
->hw_desc
;
2494 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
2498 aead_unmap(jrdev
, edesc
, req
);
2505 static int gcm_decrypt(struct aead_request
*req
)
2507 struct aead_edesc
*edesc
;
2508 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2509 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2510 struct device
*jrdev
= ctx
->jrdev
;
2515 /* allocate extended descriptor */
2516 edesc
= aead_edesc_alloc(req
, GCM_DESC_JOB_IO_LEN
, &all_contig
, false);
2518 return PTR_ERR(edesc
);
2520 /* Create and submit job descriptor*/
2521 init_gcm_job(req
, edesc
, all_contig
, false);
2523 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2524 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2525 desc_bytes(edesc
->hw_desc
), 1);
2528 desc
= edesc
->hw_desc
;
2529 ret
= caam_jr_enqueue(jrdev
, desc
, aead_decrypt_done
, req
);
2533 aead_unmap(jrdev
, edesc
, req
);
2540 static int ipsec_gcm_decrypt(struct aead_request
*req
)
2542 if (req
->assoclen
< 8)
2545 return gcm_decrypt(req
);
2548 static int aead_decrypt(struct aead_request
*req
)
2550 struct aead_edesc
*edesc
;
2551 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2552 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2553 struct device
*jrdev
= ctx
->jrdev
;
2559 bool may_sleep
= ((req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2560 CRYPTO_TFM_REQ_MAY_SLEEP
)) != 0);
2561 dbg_dump_sg(KERN_ERR
, "dec src@"__stringify(__LINE__
)": ",
2562 DUMP_PREFIX_ADDRESS
, 16, 4, req
->src
,
2563 req
->assoclen
+ req
->cryptlen
, 1, may_sleep
);
2566 /* allocate extended descriptor */
2567 edesc
= aead_edesc_alloc(req
, AUTHENC_DESC_JOB_IO_LEN
,
2568 &all_contig
, false);
2570 return PTR_ERR(edesc
);
2572 /* Create and submit job descriptor*/
2573 init_authenc_job(req
, edesc
, all_contig
, false);
2575 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2576 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2577 desc_bytes(edesc
->hw_desc
), 1);
2580 desc
= edesc
->hw_desc
;
2581 ret
= caam_jr_enqueue(jrdev
, desc
, aead_decrypt_done
, req
);
2585 aead_unmap(jrdev
, edesc
, req
);
2593 * allocate and map the ablkcipher extended descriptor for ablkcipher
2595 static struct ablkcipher_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
2596 *req
, int desc_bytes
,
2597 bool *iv_contig_out
)
2599 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2600 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
2601 struct device
*jrdev
= ctx
->jrdev
;
2602 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2603 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
2604 GFP_KERNEL
: GFP_ATOMIC
;
2605 int src_nents
, dst_nents
= 0, sec4_sg_bytes
;
2606 struct ablkcipher_edesc
*edesc
;
2607 dma_addr_t iv_dma
= 0;
2608 bool iv_contig
= false;
2610 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2613 src_nents
= sg_count(req
->src
, req
->nbytes
);
2615 if (req
->dst
!= req
->src
)
2616 dst_nents
= sg_count(req
->dst
, req
->nbytes
);
2618 if (likely(req
->src
== req
->dst
)) {
2619 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2622 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2624 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
2628 iv_dma
= dma_map_single(jrdev
, req
->info
, ivsize
, DMA_TO_DEVICE
);
2629 if (dma_mapping_error(jrdev
, iv_dma
)) {
2630 dev_err(jrdev
, "unable to map IV\n");
2631 return ERR_PTR(-ENOMEM
);
2635 * Check if iv can be contiguous with source and destination.
2636 * If so, include it. If not, create scatterlist.
2638 if (!src_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->src
))
2641 src_nents
= src_nents
? : 1;
2642 sec4_sg_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
2643 sizeof(struct sec4_sg_entry
);
2645 /* allocate space for base edesc and hw desc commands, link tables */
2646 edesc
= kzalloc(sizeof(*edesc
) + desc_bytes
+ sec4_sg_bytes
,
2649 dev_err(jrdev
, "could not allocate extended descriptor\n");
2650 return ERR_PTR(-ENOMEM
);
2653 edesc
->src_nents
= src_nents
;
2654 edesc
->dst_nents
= dst_nents
;
2655 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
2656 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
2661 dma_to_sec4_sg_one(edesc
->sec4_sg
, iv_dma
, ivsize
, 0);
2662 sg_to_sec4_sg_last(req
->src
, src_nents
,
2663 edesc
->sec4_sg
+ 1, 0);
2664 sec4_sg_index
+= 1 + src_nents
;
2668 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
2669 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2672 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
2673 sec4_sg_bytes
, DMA_TO_DEVICE
);
2674 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
2675 dev_err(jrdev
, "unable to map S/G table\n");
2676 return ERR_PTR(-ENOMEM
);
2679 edesc
->iv_dma
= iv_dma
;
2682 print_hex_dump(KERN_ERR
, "ablkcipher sec4_sg@"__stringify(__LINE__
)": ",
2683 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
2687 *iv_contig_out
= iv_contig
;
2691 static int ablkcipher_encrypt(struct ablkcipher_request
*req
)
2693 struct ablkcipher_edesc
*edesc
;
2694 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2695 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
2696 struct device
*jrdev
= ctx
->jrdev
;
2701 /* allocate extended descriptor */
2702 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
2703 CAAM_CMD_SZ
, &iv_contig
);
2705 return PTR_ERR(edesc
);
2707 /* Create and submit job descriptor*/
2708 init_ablkcipher_job(ctx
->sh_desc_enc
,
2709 ctx
->sh_desc_enc_dma
, edesc
, req
, iv_contig
);
2711 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
2712 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2713 desc_bytes(edesc
->hw_desc
), 1);
2715 desc
= edesc
->hw_desc
;
2716 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
2721 ablkcipher_unmap(jrdev
, edesc
, req
);
2728 static int ablkcipher_decrypt(struct ablkcipher_request
*req
)
2730 struct ablkcipher_edesc
*edesc
;
2731 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2732 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
2733 struct device
*jrdev
= ctx
->jrdev
;
2738 /* allocate extended descriptor */
2739 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
2740 CAAM_CMD_SZ
, &iv_contig
);
2742 return PTR_ERR(edesc
);
2744 /* Create and submit job descriptor*/
2745 init_ablkcipher_job(ctx
->sh_desc_dec
,
2746 ctx
->sh_desc_dec_dma
, edesc
, req
, iv_contig
);
2747 desc
= edesc
->hw_desc
;
2749 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
2750 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2751 desc_bytes(edesc
->hw_desc
), 1);
2754 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_decrypt_done
, req
);
2758 ablkcipher_unmap(jrdev
, edesc
, req
);
2766 * allocate and map the ablkcipher extended descriptor
2767 * for ablkcipher givencrypt
2769 static struct ablkcipher_edesc
*ablkcipher_giv_edesc_alloc(
2770 struct skcipher_givcrypt_request
*greq
,
2772 bool *iv_contig_out
)
2774 struct ablkcipher_request
*req
= &greq
->creq
;
2775 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2776 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
2777 struct device
*jrdev
= ctx
->jrdev
;
2778 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2779 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
2780 GFP_KERNEL
: GFP_ATOMIC
;
2781 int src_nents
, dst_nents
= 0, sec4_sg_bytes
;
2782 struct ablkcipher_edesc
*edesc
;
2783 dma_addr_t iv_dma
= 0;
2784 bool iv_contig
= false;
2786 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2789 src_nents
= sg_count(req
->src
, req
->nbytes
);
2791 if (unlikely(req
->dst
!= req
->src
))
2792 dst_nents
= sg_count(req
->dst
, req
->nbytes
);
2794 if (likely(req
->src
== req
->dst
)) {
2795 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2798 sgc
= dma_map_sg(jrdev
, req
->src
, src_nents
? : 1,
2800 sgc
= dma_map_sg(jrdev
, req
->dst
, dst_nents
? : 1,
2805 * Check if iv can be contiguous with source and destination.
2806 * If so, include it. If not, create scatterlist.
2808 iv_dma
= dma_map_single(jrdev
, greq
->giv
, ivsize
, DMA_TO_DEVICE
);
2809 if (dma_mapping_error(jrdev
, iv_dma
)) {
2810 dev_err(jrdev
, "unable to map IV\n");
2811 return ERR_PTR(-ENOMEM
);
2814 if (!dst_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->dst
))
2817 dst_nents
= dst_nents
? : 1;
2818 sec4_sg_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
2819 sizeof(struct sec4_sg_entry
);
2821 /* allocate space for base edesc and hw desc commands, link tables */
2822 edesc
= kzalloc(sizeof(*edesc
) + desc_bytes
+ sec4_sg_bytes
,
2825 dev_err(jrdev
, "could not allocate extended descriptor\n");
2826 return ERR_PTR(-ENOMEM
);
2829 edesc
->src_nents
= src_nents
;
2830 edesc
->dst_nents
= dst_nents
;
2831 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
2832 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
2837 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
2838 sec4_sg_index
+= src_nents
;
2842 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
2845 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
2846 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2849 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
2850 sec4_sg_bytes
, DMA_TO_DEVICE
);
2851 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
2852 dev_err(jrdev
, "unable to map S/G table\n");
2853 return ERR_PTR(-ENOMEM
);
2855 edesc
->iv_dma
= iv_dma
;
2858 print_hex_dump(KERN_ERR
,
2859 "ablkcipher sec4_sg@" __stringify(__LINE__
) ": ",
2860 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
2864 *iv_contig_out
= iv_contig
;
2868 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request
*creq
)
2870 struct ablkcipher_request
*req
= &creq
->creq
;
2871 struct ablkcipher_edesc
*edesc
;
2872 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2873 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
2874 struct device
*jrdev
= ctx
->jrdev
;
2879 /* allocate extended descriptor */
2880 edesc
= ablkcipher_giv_edesc_alloc(creq
, DESC_JOB_IO_LEN
*
2881 CAAM_CMD_SZ
, &iv_contig
);
2883 return PTR_ERR(edesc
);
2885 /* Create and submit job descriptor*/
2886 init_ablkcipher_giv_job(ctx
->sh_desc_givenc
, ctx
->sh_desc_givenc_dma
,
2887 edesc
, req
, iv_contig
);
2889 print_hex_dump(KERN_ERR
,
2890 "ablkcipher jobdesc@" __stringify(__LINE__
) ": ",
2891 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2892 desc_bytes(edesc
->hw_desc
), 1);
2894 desc
= edesc
->hw_desc
;
2895 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
2900 ablkcipher_unmap(jrdev
, edesc
, req
);
2907 #define template_aead template_u.aead
2908 #define template_ablkcipher template_u.ablkcipher
2909 struct caam_alg_template
{
2910 char name
[CRYPTO_MAX_ALG_NAME
];
2911 char driver_name
[CRYPTO_MAX_ALG_NAME
];
2912 unsigned int blocksize
;
2915 struct ablkcipher_alg ablkcipher
;
2917 u32 class1_alg_type
;
2918 u32 class2_alg_type
;
2922 static struct caam_alg_template driver_algs
[] = {
2923 /* ablkcipher descriptor */
2926 .driver_name
= "cbc-aes-caam",
2927 .blocksize
= AES_BLOCK_SIZE
,
2928 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
2929 .template_ablkcipher
= {
2930 .setkey
= ablkcipher_setkey
,
2931 .encrypt
= ablkcipher_encrypt
,
2932 .decrypt
= ablkcipher_decrypt
,
2933 .givencrypt
= ablkcipher_givencrypt
,
2934 .geniv
= "<built-in>",
2935 .min_keysize
= AES_MIN_KEY_SIZE
,
2936 .max_keysize
= AES_MAX_KEY_SIZE
,
2937 .ivsize
= AES_BLOCK_SIZE
,
2939 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
2942 .name
= "cbc(des3_ede)",
2943 .driver_name
= "cbc-3des-caam",
2944 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2945 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
2946 .template_ablkcipher
= {
2947 .setkey
= ablkcipher_setkey
,
2948 .encrypt
= ablkcipher_encrypt
,
2949 .decrypt
= ablkcipher_decrypt
,
2950 .givencrypt
= ablkcipher_givencrypt
,
2951 .geniv
= "<built-in>",
2952 .min_keysize
= DES3_EDE_KEY_SIZE
,
2953 .max_keysize
= DES3_EDE_KEY_SIZE
,
2954 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2956 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
2960 .driver_name
= "cbc-des-caam",
2961 .blocksize
= DES_BLOCK_SIZE
,
2962 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
2963 .template_ablkcipher
= {
2964 .setkey
= ablkcipher_setkey
,
2965 .encrypt
= ablkcipher_encrypt
,
2966 .decrypt
= ablkcipher_decrypt
,
2967 .givencrypt
= ablkcipher_givencrypt
,
2968 .geniv
= "<built-in>",
2969 .min_keysize
= DES_KEY_SIZE
,
2970 .max_keysize
= DES_KEY_SIZE
,
2971 .ivsize
= DES_BLOCK_SIZE
,
2973 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
2977 .driver_name
= "ctr-aes-caam",
2979 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2980 .template_ablkcipher
= {
2981 .setkey
= ablkcipher_setkey
,
2982 .encrypt
= ablkcipher_encrypt
,
2983 .decrypt
= ablkcipher_decrypt
,
2985 .min_keysize
= AES_MIN_KEY_SIZE
,
2986 .max_keysize
= AES_MAX_KEY_SIZE
,
2987 .ivsize
= AES_BLOCK_SIZE
,
2989 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
2992 .name
= "rfc3686(ctr(aes))",
2993 .driver_name
= "rfc3686-ctr-aes-caam",
2995 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
2996 .template_ablkcipher
= {
2997 .setkey
= ablkcipher_setkey
,
2998 .encrypt
= ablkcipher_encrypt
,
2999 .decrypt
= ablkcipher_decrypt
,
3000 .givencrypt
= ablkcipher_givencrypt
,
3001 .geniv
= "<built-in>",
3002 .min_keysize
= AES_MIN_KEY_SIZE
+
3003 CTR_RFC3686_NONCE_SIZE
,
3004 .max_keysize
= AES_MAX_KEY_SIZE
+
3005 CTR_RFC3686_NONCE_SIZE
,
3006 .ivsize
= CTR_RFC3686_IV_SIZE
,
3008 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3012 .driver_name
= "xts-aes-caam",
3013 .blocksize
= AES_BLOCK_SIZE
,
3014 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3015 .template_ablkcipher
= {
3016 .setkey
= xts_ablkcipher_setkey
,
3017 .encrypt
= ablkcipher_encrypt
,
3018 .decrypt
= ablkcipher_decrypt
,
3020 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3021 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3022 .ivsize
= AES_BLOCK_SIZE
,
3024 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_XTS
,
3028 static struct caam_aead_alg driver_aeads
[] = {
3032 .cra_name
= "rfc4106(gcm(aes))",
3033 .cra_driver_name
= "rfc4106-gcm-aes-caam",
3036 .setkey
= rfc4106_setkey
,
3037 .setauthsize
= rfc4106_setauthsize
,
3038 .encrypt
= ipsec_gcm_encrypt
,
3039 .decrypt
= ipsec_gcm_decrypt
,
3041 .maxauthsize
= AES_BLOCK_SIZE
,
3044 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
3050 .cra_name
= "rfc4543(gcm(aes))",
3051 .cra_driver_name
= "rfc4543-gcm-aes-caam",
3054 .setkey
= rfc4543_setkey
,
3055 .setauthsize
= rfc4543_setauthsize
,
3056 .encrypt
= ipsec_gcm_encrypt
,
3057 .decrypt
= ipsec_gcm_decrypt
,
3059 .maxauthsize
= AES_BLOCK_SIZE
,
3062 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
3065 /* Galois Counter Mode */
3069 .cra_name
= "gcm(aes)",
3070 .cra_driver_name
= "gcm-aes-caam",
3073 .setkey
= gcm_setkey
,
3074 .setauthsize
= gcm_setauthsize
,
3075 .encrypt
= gcm_encrypt
,
3076 .decrypt
= gcm_decrypt
,
3078 .maxauthsize
= AES_BLOCK_SIZE
,
3081 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
3084 /* single-pass ipsec_esp descriptor */
3088 .cra_name
= "authenc(hmac(md5),"
3089 "ecb(cipher_null))",
3090 .cra_driver_name
= "authenc-hmac-md5-"
3091 "ecb-cipher_null-caam",
3092 .cra_blocksize
= NULL_BLOCK_SIZE
,
3094 .setkey
= aead_setkey
,
3095 .setauthsize
= aead_setauthsize
,
3096 .encrypt
= aead_encrypt
,
3097 .decrypt
= aead_decrypt
,
3098 .ivsize
= NULL_IV_SIZE
,
3099 .maxauthsize
= MD5_DIGEST_SIZE
,
3102 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3103 OP_ALG_AAI_HMAC_PRECOMP
,
3104 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3110 .cra_name
= "authenc(hmac(sha1),"
3111 "ecb(cipher_null))",
3112 .cra_driver_name
= "authenc-hmac-sha1-"
3113 "ecb-cipher_null-caam",
3114 .cra_blocksize
= NULL_BLOCK_SIZE
,
3116 .setkey
= aead_setkey
,
3117 .setauthsize
= aead_setauthsize
,
3118 .encrypt
= aead_encrypt
,
3119 .decrypt
= aead_decrypt
,
3120 .ivsize
= NULL_IV_SIZE
,
3121 .maxauthsize
= SHA1_DIGEST_SIZE
,
3124 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3125 OP_ALG_AAI_HMAC_PRECOMP
,
3126 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3132 .cra_name
= "authenc(hmac(sha224),"
3133 "ecb(cipher_null))",
3134 .cra_driver_name
= "authenc-hmac-sha224-"
3135 "ecb-cipher_null-caam",
3136 .cra_blocksize
= NULL_BLOCK_SIZE
,
3138 .setkey
= aead_setkey
,
3139 .setauthsize
= aead_setauthsize
,
3140 .encrypt
= aead_encrypt
,
3141 .decrypt
= aead_decrypt
,
3142 .ivsize
= NULL_IV_SIZE
,
3143 .maxauthsize
= SHA224_DIGEST_SIZE
,
3146 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3147 OP_ALG_AAI_HMAC_PRECOMP
,
3148 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3154 .cra_name
= "authenc(hmac(sha256),"
3155 "ecb(cipher_null))",
3156 .cra_driver_name
= "authenc-hmac-sha256-"
3157 "ecb-cipher_null-caam",
3158 .cra_blocksize
= NULL_BLOCK_SIZE
,
3160 .setkey
= aead_setkey
,
3161 .setauthsize
= aead_setauthsize
,
3162 .encrypt
= aead_encrypt
,
3163 .decrypt
= aead_decrypt
,
3164 .ivsize
= NULL_IV_SIZE
,
3165 .maxauthsize
= SHA256_DIGEST_SIZE
,
3168 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3169 OP_ALG_AAI_HMAC_PRECOMP
,
3170 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3176 .cra_name
= "authenc(hmac(sha384),"
3177 "ecb(cipher_null))",
3178 .cra_driver_name
= "authenc-hmac-sha384-"
3179 "ecb-cipher_null-caam",
3180 .cra_blocksize
= NULL_BLOCK_SIZE
,
3182 .setkey
= aead_setkey
,
3183 .setauthsize
= aead_setauthsize
,
3184 .encrypt
= aead_encrypt
,
3185 .decrypt
= aead_decrypt
,
3186 .ivsize
= NULL_IV_SIZE
,
3187 .maxauthsize
= SHA384_DIGEST_SIZE
,
3190 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3191 OP_ALG_AAI_HMAC_PRECOMP
,
3192 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3198 .cra_name
= "authenc(hmac(sha512),"
3199 "ecb(cipher_null))",
3200 .cra_driver_name
= "authenc-hmac-sha512-"
3201 "ecb-cipher_null-caam",
3202 .cra_blocksize
= NULL_BLOCK_SIZE
,
3204 .setkey
= aead_setkey
,
3205 .setauthsize
= aead_setauthsize
,
3206 .encrypt
= aead_encrypt
,
3207 .decrypt
= aead_decrypt
,
3208 .ivsize
= NULL_IV_SIZE
,
3209 .maxauthsize
= SHA512_DIGEST_SIZE
,
3212 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3213 OP_ALG_AAI_HMAC_PRECOMP
,
3214 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3220 .cra_name
= "authenc(hmac(md5),cbc(aes))",
3221 .cra_driver_name
= "authenc-hmac-md5-"
3223 .cra_blocksize
= AES_BLOCK_SIZE
,
3225 .setkey
= aead_setkey
,
3226 .setauthsize
= aead_setauthsize
,
3227 .encrypt
= aead_encrypt
,
3228 .decrypt
= aead_decrypt
,
3229 .ivsize
= AES_BLOCK_SIZE
,
3230 .maxauthsize
= MD5_DIGEST_SIZE
,
3233 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3234 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3235 OP_ALG_AAI_HMAC_PRECOMP
,
3236 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3242 .cra_name
= "echainiv(authenc(hmac(md5),"
3244 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
3246 .cra_blocksize
= AES_BLOCK_SIZE
,
3248 .setkey
= aead_setkey
,
3249 .setauthsize
= aead_setauthsize
,
3250 .encrypt
= aead_encrypt
,
3251 .decrypt
= aead_decrypt
,
3252 .ivsize
= AES_BLOCK_SIZE
,
3253 .maxauthsize
= MD5_DIGEST_SIZE
,
3256 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3257 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3258 OP_ALG_AAI_HMAC_PRECOMP
,
3259 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3266 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3267 .cra_driver_name
= "authenc-hmac-sha1-"
3269 .cra_blocksize
= AES_BLOCK_SIZE
,
3271 .setkey
= aead_setkey
,
3272 .setauthsize
= aead_setauthsize
,
3273 .encrypt
= aead_encrypt
,
3274 .decrypt
= aead_decrypt
,
3275 .ivsize
= AES_BLOCK_SIZE
,
3276 .maxauthsize
= SHA1_DIGEST_SIZE
,
3279 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3280 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3281 OP_ALG_AAI_HMAC_PRECOMP
,
3282 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3288 .cra_name
= "echainiv(authenc(hmac(sha1),"
3290 .cra_driver_name
= "echainiv-authenc-"
3291 "hmac-sha1-cbc-aes-caam",
3292 .cra_blocksize
= AES_BLOCK_SIZE
,
3294 .setkey
= aead_setkey
,
3295 .setauthsize
= aead_setauthsize
,
3296 .encrypt
= aead_encrypt
,
3297 .decrypt
= aead_decrypt
,
3298 .ivsize
= AES_BLOCK_SIZE
,
3299 .maxauthsize
= SHA1_DIGEST_SIZE
,
3302 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3303 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3304 OP_ALG_AAI_HMAC_PRECOMP
,
3305 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3312 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
3313 .cra_driver_name
= "authenc-hmac-sha224-"
3315 .cra_blocksize
= AES_BLOCK_SIZE
,
3317 .setkey
= aead_setkey
,
3318 .setauthsize
= aead_setauthsize
,
3319 .encrypt
= aead_encrypt
,
3320 .decrypt
= aead_decrypt
,
3321 .ivsize
= AES_BLOCK_SIZE
,
3322 .maxauthsize
= SHA224_DIGEST_SIZE
,
3325 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3326 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3327 OP_ALG_AAI_HMAC_PRECOMP
,
3328 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3334 .cra_name
= "echainiv(authenc(hmac(sha224),"
3336 .cra_driver_name
= "echainiv-authenc-"
3337 "hmac-sha224-cbc-aes-caam",
3338 .cra_blocksize
= AES_BLOCK_SIZE
,
3340 .setkey
= aead_setkey
,
3341 .setauthsize
= aead_setauthsize
,
3342 .encrypt
= aead_encrypt
,
3343 .decrypt
= aead_decrypt
,
3344 .ivsize
= AES_BLOCK_SIZE
,
3345 .maxauthsize
= SHA224_DIGEST_SIZE
,
3348 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3349 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3350 OP_ALG_AAI_HMAC_PRECOMP
,
3351 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3358 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3359 .cra_driver_name
= "authenc-hmac-sha256-"
3361 .cra_blocksize
= AES_BLOCK_SIZE
,
3363 .setkey
= aead_setkey
,
3364 .setauthsize
= aead_setauthsize
,
3365 .encrypt
= aead_encrypt
,
3366 .decrypt
= aead_decrypt
,
3367 .ivsize
= AES_BLOCK_SIZE
,
3368 .maxauthsize
= SHA256_DIGEST_SIZE
,
3371 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3372 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3373 OP_ALG_AAI_HMAC_PRECOMP
,
3374 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3380 .cra_name
= "echainiv(authenc(hmac(sha256),"
3382 .cra_driver_name
= "echainiv-authenc-"
3383 "hmac-sha256-cbc-aes-caam",
3384 .cra_blocksize
= AES_BLOCK_SIZE
,
3386 .setkey
= aead_setkey
,
3387 .setauthsize
= aead_setauthsize
,
3388 .encrypt
= aead_encrypt
,
3389 .decrypt
= aead_decrypt
,
3390 .ivsize
= AES_BLOCK_SIZE
,
3391 .maxauthsize
= SHA256_DIGEST_SIZE
,
3394 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3395 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3396 OP_ALG_AAI_HMAC_PRECOMP
,
3397 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3404 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
3405 .cra_driver_name
= "authenc-hmac-sha384-"
3407 .cra_blocksize
= AES_BLOCK_SIZE
,
3409 .setkey
= aead_setkey
,
3410 .setauthsize
= aead_setauthsize
,
3411 .encrypt
= aead_encrypt
,
3412 .decrypt
= aead_decrypt
,
3413 .ivsize
= AES_BLOCK_SIZE
,
3414 .maxauthsize
= SHA384_DIGEST_SIZE
,
3417 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3418 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3419 OP_ALG_AAI_HMAC_PRECOMP
,
3420 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3426 .cra_name
= "echainiv(authenc(hmac(sha384),"
3428 .cra_driver_name
= "echainiv-authenc-"
3429 "hmac-sha384-cbc-aes-caam",
3430 .cra_blocksize
= AES_BLOCK_SIZE
,
3432 .setkey
= aead_setkey
,
3433 .setauthsize
= aead_setauthsize
,
3434 .encrypt
= aead_encrypt
,
3435 .decrypt
= aead_decrypt
,
3436 .ivsize
= AES_BLOCK_SIZE
,
3437 .maxauthsize
= SHA384_DIGEST_SIZE
,
3440 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3441 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3442 OP_ALG_AAI_HMAC_PRECOMP
,
3443 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3450 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
3451 .cra_driver_name
= "authenc-hmac-sha512-"
3453 .cra_blocksize
= AES_BLOCK_SIZE
,
3455 .setkey
= aead_setkey
,
3456 .setauthsize
= aead_setauthsize
,
3457 .encrypt
= aead_encrypt
,
3458 .decrypt
= aead_decrypt
,
3459 .ivsize
= AES_BLOCK_SIZE
,
3460 .maxauthsize
= SHA512_DIGEST_SIZE
,
3463 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3464 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3465 OP_ALG_AAI_HMAC_PRECOMP
,
3466 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3472 .cra_name
= "echainiv(authenc(hmac(sha512),"
3474 .cra_driver_name
= "echainiv-authenc-"
3475 "hmac-sha512-cbc-aes-caam",
3476 .cra_blocksize
= AES_BLOCK_SIZE
,
3478 .setkey
= aead_setkey
,
3479 .setauthsize
= aead_setauthsize
,
3480 .encrypt
= aead_encrypt
,
3481 .decrypt
= aead_decrypt
,
3482 .ivsize
= AES_BLOCK_SIZE
,
3483 .maxauthsize
= SHA512_DIGEST_SIZE
,
3486 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3487 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3488 OP_ALG_AAI_HMAC_PRECOMP
,
3489 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3496 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
3497 .cra_driver_name
= "authenc-hmac-md5-"
3498 "cbc-des3_ede-caam",
3499 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3501 .setkey
= aead_setkey
,
3502 .setauthsize
= aead_setauthsize
,
3503 .encrypt
= aead_encrypt
,
3504 .decrypt
= aead_decrypt
,
3505 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3506 .maxauthsize
= MD5_DIGEST_SIZE
,
3509 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3510 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3511 OP_ALG_AAI_HMAC_PRECOMP
,
3512 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3518 .cra_name
= "echainiv(authenc(hmac(md5),"
3520 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
3521 "cbc-des3_ede-caam",
3522 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3524 .setkey
= aead_setkey
,
3525 .setauthsize
= aead_setauthsize
,
3526 .encrypt
= aead_encrypt
,
3527 .decrypt
= aead_decrypt
,
3528 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3529 .maxauthsize
= MD5_DIGEST_SIZE
,
3532 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3533 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3534 OP_ALG_AAI_HMAC_PRECOMP
,
3535 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3542 .cra_name
= "authenc(hmac(sha1),"
3544 .cra_driver_name
= "authenc-hmac-sha1-"
3545 "cbc-des3_ede-caam",
3546 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3548 .setkey
= aead_setkey
,
3549 .setauthsize
= aead_setauthsize
,
3550 .encrypt
= aead_encrypt
,
3551 .decrypt
= aead_decrypt
,
3552 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3553 .maxauthsize
= SHA1_DIGEST_SIZE
,
3556 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3557 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3558 OP_ALG_AAI_HMAC_PRECOMP
,
3559 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3565 .cra_name
= "echainiv(authenc(hmac(sha1),"
3567 .cra_driver_name
= "echainiv-authenc-"
3569 "cbc-des3_ede-caam",
3570 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3572 .setkey
= aead_setkey
,
3573 .setauthsize
= aead_setauthsize
,
3574 .encrypt
= aead_encrypt
,
3575 .decrypt
= aead_decrypt
,
3576 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3577 .maxauthsize
= SHA1_DIGEST_SIZE
,
3580 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3581 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3582 OP_ALG_AAI_HMAC_PRECOMP
,
3583 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3590 .cra_name
= "authenc(hmac(sha224),"
3592 .cra_driver_name
= "authenc-hmac-sha224-"
3593 "cbc-des3_ede-caam",
3594 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3596 .setkey
= aead_setkey
,
3597 .setauthsize
= aead_setauthsize
,
3598 .encrypt
= aead_encrypt
,
3599 .decrypt
= aead_decrypt
,
3600 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3601 .maxauthsize
= SHA224_DIGEST_SIZE
,
3604 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3605 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3606 OP_ALG_AAI_HMAC_PRECOMP
,
3607 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3613 .cra_name
= "echainiv(authenc(hmac(sha224),"
3615 .cra_driver_name
= "echainiv-authenc-"
3617 "cbc-des3_ede-caam",
3618 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3620 .setkey
= aead_setkey
,
3621 .setauthsize
= aead_setauthsize
,
3622 .encrypt
= aead_encrypt
,
3623 .decrypt
= aead_decrypt
,
3624 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3625 .maxauthsize
= SHA224_DIGEST_SIZE
,
3628 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3629 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3630 OP_ALG_AAI_HMAC_PRECOMP
,
3631 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3638 .cra_name
= "authenc(hmac(sha256),"
3640 .cra_driver_name
= "authenc-hmac-sha256-"
3641 "cbc-des3_ede-caam",
3642 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3644 .setkey
= aead_setkey
,
3645 .setauthsize
= aead_setauthsize
,
3646 .encrypt
= aead_encrypt
,
3647 .decrypt
= aead_decrypt
,
3648 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3649 .maxauthsize
= SHA256_DIGEST_SIZE
,
3652 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3653 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3654 OP_ALG_AAI_HMAC_PRECOMP
,
3655 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3661 .cra_name
= "echainiv(authenc(hmac(sha256),"
3663 .cra_driver_name
= "echainiv-authenc-"
3665 "cbc-des3_ede-caam",
3666 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3668 .setkey
= aead_setkey
,
3669 .setauthsize
= aead_setauthsize
,
3670 .encrypt
= aead_encrypt
,
3671 .decrypt
= aead_decrypt
,
3672 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3673 .maxauthsize
= SHA256_DIGEST_SIZE
,
3676 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3677 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3678 OP_ALG_AAI_HMAC_PRECOMP
,
3679 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3686 .cra_name
= "authenc(hmac(sha384),"
3688 .cra_driver_name
= "authenc-hmac-sha384-"
3689 "cbc-des3_ede-caam",
3690 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3692 .setkey
= aead_setkey
,
3693 .setauthsize
= aead_setauthsize
,
3694 .encrypt
= aead_encrypt
,
3695 .decrypt
= aead_decrypt
,
3696 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3697 .maxauthsize
= SHA384_DIGEST_SIZE
,
3700 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3701 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3702 OP_ALG_AAI_HMAC_PRECOMP
,
3703 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3709 .cra_name
= "echainiv(authenc(hmac(sha384),"
3711 .cra_driver_name
= "echainiv-authenc-"
3713 "cbc-des3_ede-caam",
3714 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3716 .setkey
= aead_setkey
,
3717 .setauthsize
= aead_setauthsize
,
3718 .encrypt
= aead_encrypt
,
3719 .decrypt
= aead_decrypt
,
3720 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3721 .maxauthsize
= SHA384_DIGEST_SIZE
,
3724 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3725 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3726 OP_ALG_AAI_HMAC_PRECOMP
,
3727 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3734 .cra_name
= "authenc(hmac(sha512),"
3736 .cra_driver_name
= "authenc-hmac-sha512-"
3737 "cbc-des3_ede-caam",
3738 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3740 .setkey
= aead_setkey
,
3741 .setauthsize
= aead_setauthsize
,
3742 .encrypt
= aead_encrypt
,
3743 .decrypt
= aead_decrypt
,
3744 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3745 .maxauthsize
= SHA512_DIGEST_SIZE
,
3748 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3749 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3750 OP_ALG_AAI_HMAC_PRECOMP
,
3751 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3757 .cra_name
= "echainiv(authenc(hmac(sha512),"
3759 .cra_driver_name
= "echainiv-authenc-"
3761 "cbc-des3_ede-caam",
3762 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3764 .setkey
= aead_setkey
,
3765 .setauthsize
= aead_setauthsize
,
3766 .encrypt
= aead_encrypt
,
3767 .decrypt
= aead_decrypt
,
3768 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3769 .maxauthsize
= SHA512_DIGEST_SIZE
,
3772 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3773 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3774 OP_ALG_AAI_HMAC_PRECOMP
,
3775 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3782 .cra_name
= "authenc(hmac(md5),cbc(des))",
3783 .cra_driver_name
= "authenc-hmac-md5-"
3785 .cra_blocksize
= DES_BLOCK_SIZE
,
3787 .setkey
= aead_setkey
,
3788 .setauthsize
= aead_setauthsize
,
3789 .encrypt
= aead_encrypt
,
3790 .decrypt
= aead_decrypt
,
3791 .ivsize
= DES_BLOCK_SIZE
,
3792 .maxauthsize
= MD5_DIGEST_SIZE
,
3795 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3796 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3797 OP_ALG_AAI_HMAC_PRECOMP
,
3798 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3804 .cra_name
= "echainiv(authenc(hmac(md5),"
3806 .cra_driver_name
= "echainiv-authenc-hmac-md5-"
3808 .cra_blocksize
= DES_BLOCK_SIZE
,
3810 .setkey
= aead_setkey
,
3811 .setauthsize
= aead_setauthsize
,
3812 .encrypt
= aead_encrypt
,
3813 .decrypt
= aead_decrypt
,
3814 .ivsize
= DES_BLOCK_SIZE
,
3815 .maxauthsize
= MD5_DIGEST_SIZE
,
3818 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3819 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
3820 OP_ALG_AAI_HMAC_PRECOMP
,
3821 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3828 .cra_name
= "authenc(hmac(sha1),cbc(des))",
3829 .cra_driver_name
= "authenc-hmac-sha1-"
3831 .cra_blocksize
= DES_BLOCK_SIZE
,
3833 .setkey
= aead_setkey
,
3834 .setauthsize
= aead_setauthsize
,
3835 .encrypt
= aead_encrypt
,
3836 .decrypt
= aead_decrypt
,
3837 .ivsize
= DES_BLOCK_SIZE
,
3838 .maxauthsize
= SHA1_DIGEST_SIZE
,
3841 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3842 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3843 OP_ALG_AAI_HMAC_PRECOMP
,
3844 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3850 .cra_name
= "echainiv(authenc(hmac(sha1),"
3852 .cra_driver_name
= "echainiv-authenc-"
3853 "hmac-sha1-cbc-des-caam",
3854 .cra_blocksize
= DES_BLOCK_SIZE
,
3856 .setkey
= aead_setkey
,
3857 .setauthsize
= aead_setauthsize
,
3858 .encrypt
= aead_encrypt
,
3859 .decrypt
= aead_decrypt
,
3860 .ivsize
= DES_BLOCK_SIZE
,
3861 .maxauthsize
= SHA1_DIGEST_SIZE
,
3864 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3865 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
3866 OP_ALG_AAI_HMAC_PRECOMP
,
3867 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3874 .cra_name
= "authenc(hmac(sha224),cbc(des))",
3875 .cra_driver_name
= "authenc-hmac-sha224-"
3877 .cra_blocksize
= DES_BLOCK_SIZE
,
3879 .setkey
= aead_setkey
,
3880 .setauthsize
= aead_setauthsize
,
3881 .encrypt
= aead_encrypt
,
3882 .decrypt
= aead_decrypt
,
3883 .ivsize
= DES_BLOCK_SIZE
,
3884 .maxauthsize
= SHA224_DIGEST_SIZE
,
3887 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3888 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3889 OP_ALG_AAI_HMAC_PRECOMP
,
3890 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3896 .cra_name
= "echainiv(authenc(hmac(sha224),"
3898 .cra_driver_name
= "echainiv-authenc-"
3899 "hmac-sha224-cbc-des-caam",
3900 .cra_blocksize
= DES_BLOCK_SIZE
,
3902 .setkey
= aead_setkey
,
3903 .setauthsize
= aead_setauthsize
,
3904 .encrypt
= aead_encrypt
,
3905 .decrypt
= aead_decrypt
,
3906 .ivsize
= DES_BLOCK_SIZE
,
3907 .maxauthsize
= SHA224_DIGEST_SIZE
,
3910 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3911 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3912 OP_ALG_AAI_HMAC_PRECOMP
,
3913 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3920 .cra_name
= "authenc(hmac(sha256),cbc(des))",
3921 .cra_driver_name
= "authenc-hmac-sha256-"
3923 .cra_blocksize
= DES_BLOCK_SIZE
,
3925 .setkey
= aead_setkey
,
3926 .setauthsize
= aead_setauthsize
,
3927 .encrypt
= aead_encrypt
,
3928 .decrypt
= aead_decrypt
,
3929 .ivsize
= DES_BLOCK_SIZE
,
3930 .maxauthsize
= SHA256_DIGEST_SIZE
,
3933 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3934 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3935 OP_ALG_AAI_HMAC_PRECOMP
,
3936 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3942 .cra_name
= "echainiv(authenc(hmac(sha256),"
3944 .cra_driver_name
= "echainiv-authenc-"
3945 "hmac-sha256-cbc-des-caam",
3946 .cra_blocksize
= DES_BLOCK_SIZE
,
3948 .setkey
= aead_setkey
,
3949 .setauthsize
= aead_setauthsize
,
3950 .encrypt
= aead_encrypt
,
3951 .decrypt
= aead_decrypt
,
3952 .ivsize
= DES_BLOCK_SIZE
,
3953 .maxauthsize
= SHA256_DIGEST_SIZE
,
3956 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3957 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3958 OP_ALG_AAI_HMAC_PRECOMP
,
3959 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3966 .cra_name
= "authenc(hmac(sha384),cbc(des))",
3967 .cra_driver_name
= "authenc-hmac-sha384-"
3969 .cra_blocksize
= DES_BLOCK_SIZE
,
3971 .setkey
= aead_setkey
,
3972 .setauthsize
= aead_setauthsize
,
3973 .encrypt
= aead_encrypt
,
3974 .decrypt
= aead_decrypt
,
3975 .ivsize
= DES_BLOCK_SIZE
,
3976 .maxauthsize
= SHA384_DIGEST_SIZE
,
3979 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3980 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3981 OP_ALG_AAI_HMAC_PRECOMP
,
3982 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3988 .cra_name
= "echainiv(authenc(hmac(sha384),"
3990 .cra_driver_name
= "echainiv-authenc-"
3991 "hmac-sha384-cbc-des-caam",
3992 .cra_blocksize
= DES_BLOCK_SIZE
,
3994 .setkey
= aead_setkey
,
3995 .setauthsize
= aead_setauthsize
,
3996 .encrypt
= aead_encrypt
,
3997 .decrypt
= aead_decrypt
,
3998 .ivsize
= DES_BLOCK_SIZE
,
3999 .maxauthsize
= SHA384_DIGEST_SIZE
,
4002 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
4003 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
4004 OP_ALG_AAI_HMAC_PRECOMP
,
4005 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
4012 .cra_name
= "authenc(hmac(sha512),cbc(des))",
4013 .cra_driver_name
= "authenc-hmac-sha512-"
4015 .cra_blocksize
= DES_BLOCK_SIZE
,
4017 .setkey
= aead_setkey
,
4018 .setauthsize
= aead_setauthsize
,
4019 .encrypt
= aead_encrypt
,
4020 .decrypt
= aead_decrypt
,
4021 .ivsize
= DES_BLOCK_SIZE
,
4022 .maxauthsize
= SHA512_DIGEST_SIZE
,
4025 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
4026 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
4027 OP_ALG_AAI_HMAC_PRECOMP
,
4028 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
4034 .cra_name
= "echainiv(authenc(hmac(sha512),"
4036 .cra_driver_name
= "echainiv-authenc-"
4037 "hmac-sha512-cbc-des-caam",
4038 .cra_blocksize
= DES_BLOCK_SIZE
,
4040 .setkey
= aead_setkey
,
4041 .setauthsize
= aead_setauthsize
,
4042 .encrypt
= aead_encrypt
,
4043 .decrypt
= aead_decrypt
,
4044 .ivsize
= DES_BLOCK_SIZE
,
4045 .maxauthsize
= SHA512_DIGEST_SIZE
,
4048 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
4049 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
4050 OP_ALG_AAI_HMAC_PRECOMP
,
4051 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
4058 .cra_name
= "authenc(hmac(md5),"
4059 "rfc3686(ctr(aes)))",
4060 .cra_driver_name
= "authenc-hmac-md5-"
4061 "rfc3686-ctr-aes-caam",
4064 .setkey
= aead_setkey
,
4065 .setauthsize
= aead_setauthsize
,
4066 .encrypt
= aead_encrypt
,
4067 .decrypt
= aead_decrypt
,
4068 .ivsize
= CTR_RFC3686_IV_SIZE
,
4069 .maxauthsize
= MD5_DIGEST_SIZE
,
4072 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4073 OP_ALG_AAI_CTR_MOD128
,
4074 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
4075 OP_ALG_AAI_HMAC_PRECOMP
,
4076 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
4083 .cra_name
= "seqiv(authenc("
4084 "hmac(md5),rfc3686(ctr(aes))))",
4085 .cra_driver_name
= "seqiv-authenc-hmac-md5-"
4086 "rfc3686-ctr-aes-caam",
4089 .setkey
= aead_setkey
,
4090 .setauthsize
= aead_setauthsize
,
4091 .encrypt
= aead_encrypt
,
4092 .decrypt
= aead_decrypt
,
4093 .ivsize
= CTR_RFC3686_IV_SIZE
,
4094 .maxauthsize
= MD5_DIGEST_SIZE
,
4097 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4098 OP_ALG_AAI_CTR_MOD128
,
4099 .class2_alg_type
= OP_ALG_ALGSEL_MD5
|
4100 OP_ALG_AAI_HMAC_PRECOMP
,
4101 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
4109 .cra_name
= "authenc(hmac(sha1),"
4110 "rfc3686(ctr(aes)))",
4111 .cra_driver_name
= "authenc-hmac-sha1-"
4112 "rfc3686-ctr-aes-caam",
4115 .setkey
= aead_setkey
,
4116 .setauthsize
= aead_setauthsize
,
4117 .encrypt
= aead_encrypt
,
4118 .decrypt
= aead_decrypt
,
4119 .ivsize
= CTR_RFC3686_IV_SIZE
,
4120 .maxauthsize
= SHA1_DIGEST_SIZE
,
4123 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4124 OP_ALG_AAI_CTR_MOD128
,
4125 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
4126 OP_ALG_AAI_HMAC_PRECOMP
,
4127 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
4134 .cra_name
= "seqiv(authenc("
4135 "hmac(sha1),rfc3686(ctr(aes))))",
4136 .cra_driver_name
= "seqiv-authenc-hmac-sha1-"
4137 "rfc3686-ctr-aes-caam",
4140 .setkey
= aead_setkey
,
4141 .setauthsize
= aead_setauthsize
,
4142 .encrypt
= aead_encrypt
,
4143 .decrypt
= aead_decrypt
,
4144 .ivsize
= CTR_RFC3686_IV_SIZE
,
4145 .maxauthsize
= SHA1_DIGEST_SIZE
,
4148 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4149 OP_ALG_AAI_CTR_MOD128
,
4150 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
|
4151 OP_ALG_AAI_HMAC_PRECOMP
,
4152 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
4160 .cra_name
= "authenc(hmac(sha224),"
4161 "rfc3686(ctr(aes)))",
4162 .cra_driver_name
= "authenc-hmac-sha224-"
4163 "rfc3686-ctr-aes-caam",
4166 .setkey
= aead_setkey
,
4167 .setauthsize
= aead_setauthsize
,
4168 .encrypt
= aead_encrypt
,
4169 .decrypt
= aead_decrypt
,
4170 .ivsize
= CTR_RFC3686_IV_SIZE
,
4171 .maxauthsize
= SHA224_DIGEST_SIZE
,
4174 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4175 OP_ALG_AAI_CTR_MOD128
,
4176 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
4177 OP_ALG_AAI_HMAC_PRECOMP
,
4178 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
4185 .cra_name
= "seqiv(authenc("
4186 "hmac(sha224),rfc3686(ctr(aes))))",
4187 .cra_driver_name
= "seqiv-authenc-hmac-sha224-"
4188 "rfc3686-ctr-aes-caam",
4191 .setkey
= aead_setkey
,
4192 .setauthsize
= aead_setauthsize
,
4193 .encrypt
= aead_encrypt
,
4194 .decrypt
= aead_decrypt
,
4195 .ivsize
= CTR_RFC3686_IV_SIZE
,
4196 .maxauthsize
= SHA224_DIGEST_SIZE
,
4199 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4200 OP_ALG_AAI_CTR_MOD128
,
4201 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
4202 OP_ALG_AAI_HMAC_PRECOMP
,
4203 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
4211 .cra_name
= "authenc(hmac(sha256),"
4212 "rfc3686(ctr(aes)))",
4213 .cra_driver_name
= "authenc-hmac-sha256-"
4214 "rfc3686-ctr-aes-caam",
4217 .setkey
= aead_setkey
,
4218 .setauthsize
= aead_setauthsize
,
4219 .encrypt
= aead_encrypt
,
4220 .decrypt
= aead_decrypt
,
4221 .ivsize
= CTR_RFC3686_IV_SIZE
,
4222 .maxauthsize
= SHA256_DIGEST_SIZE
,
4225 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4226 OP_ALG_AAI_CTR_MOD128
,
4227 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
4228 OP_ALG_AAI_HMAC_PRECOMP
,
4229 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
4236 .cra_name
= "seqiv(authenc(hmac(sha256),"
4237 "rfc3686(ctr(aes))))",
4238 .cra_driver_name
= "seqiv-authenc-hmac-sha256-"
4239 "rfc3686-ctr-aes-caam",
4242 .setkey
= aead_setkey
,
4243 .setauthsize
= aead_setauthsize
,
4244 .encrypt
= aead_encrypt
,
4245 .decrypt
= aead_decrypt
,
4246 .ivsize
= CTR_RFC3686_IV_SIZE
,
4247 .maxauthsize
= SHA256_DIGEST_SIZE
,
4250 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4251 OP_ALG_AAI_CTR_MOD128
,
4252 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
4253 OP_ALG_AAI_HMAC_PRECOMP
,
4254 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
4262 .cra_name
= "authenc(hmac(sha384),"
4263 "rfc3686(ctr(aes)))",
4264 .cra_driver_name
= "authenc-hmac-sha384-"
4265 "rfc3686-ctr-aes-caam",
4268 .setkey
= aead_setkey
,
4269 .setauthsize
= aead_setauthsize
,
4270 .encrypt
= aead_encrypt
,
4271 .decrypt
= aead_decrypt
,
4272 .ivsize
= CTR_RFC3686_IV_SIZE
,
4273 .maxauthsize
= SHA384_DIGEST_SIZE
,
4276 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4277 OP_ALG_AAI_CTR_MOD128
,
4278 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
4279 OP_ALG_AAI_HMAC_PRECOMP
,
4280 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
4287 .cra_name
= "seqiv(authenc(hmac(sha384),"
4288 "rfc3686(ctr(aes))))",
4289 .cra_driver_name
= "seqiv-authenc-hmac-sha384-"
4290 "rfc3686-ctr-aes-caam",
4293 .setkey
= aead_setkey
,
4294 .setauthsize
= aead_setauthsize
,
4295 .encrypt
= aead_encrypt
,
4296 .decrypt
= aead_decrypt
,
4297 .ivsize
= CTR_RFC3686_IV_SIZE
,
4298 .maxauthsize
= SHA384_DIGEST_SIZE
,
4301 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4302 OP_ALG_AAI_CTR_MOD128
,
4303 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
4304 OP_ALG_AAI_HMAC_PRECOMP
,
4305 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
4313 .cra_name
= "authenc(hmac(sha512),"
4314 "rfc3686(ctr(aes)))",
4315 .cra_driver_name
= "authenc-hmac-sha512-"
4316 "rfc3686-ctr-aes-caam",
4319 .setkey
= aead_setkey
,
4320 .setauthsize
= aead_setauthsize
,
4321 .encrypt
= aead_encrypt
,
4322 .decrypt
= aead_decrypt
,
4323 .ivsize
= CTR_RFC3686_IV_SIZE
,
4324 .maxauthsize
= SHA512_DIGEST_SIZE
,
4327 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4328 OP_ALG_AAI_CTR_MOD128
,
4329 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
4330 OP_ALG_AAI_HMAC_PRECOMP
,
4331 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
4338 .cra_name
= "seqiv(authenc(hmac(sha512),"
4339 "rfc3686(ctr(aes))))",
4340 .cra_driver_name
= "seqiv-authenc-hmac-sha512-"
4341 "rfc3686-ctr-aes-caam",
4344 .setkey
= aead_setkey
,
4345 .setauthsize
= aead_setauthsize
,
4346 .encrypt
= aead_encrypt
,
4347 .decrypt
= aead_decrypt
,
4348 .ivsize
= CTR_RFC3686_IV_SIZE
,
4349 .maxauthsize
= SHA512_DIGEST_SIZE
,
4352 .class1_alg_type
= OP_ALG_ALGSEL_AES
|
4353 OP_ALG_AAI_CTR_MOD128
,
4354 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
4355 OP_ALG_AAI_HMAC_PRECOMP
,
4356 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
4363 struct caam_crypto_alg
{
4364 struct crypto_alg crypto_alg
;
4365 struct list_head entry
;
4366 struct caam_alg_entry caam
;
4369 static int caam_init_common(struct caam_ctx
*ctx
, struct caam_alg_entry
*caam
)
4371 ctx
->jrdev
= caam_jr_alloc();
4372 if (IS_ERR(ctx
->jrdev
)) {
4373 pr_err("Job Ring Device allocation for transform failed\n");
4374 return PTR_ERR(ctx
->jrdev
);
4377 /* copy descriptor header template value */
4378 ctx
->class1_alg_type
= OP_TYPE_CLASS1_ALG
| caam
->class1_alg_type
;
4379 ctx
->class2_alg_type
= OP_TYPE_CLASS2_ALG
| caam
->class2_alg_type
;
4380 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam
->alg_op
;
4385 static int caam_cra_init(struct crypto_tfm
*tfm
)
4387 struct crypto_alg
*alg
= tfm
->__crt_alg
;
4388 struct caam_crypto_alg
*caam_alg
=
4389 container_of(alg
, struct caam_crypto_alg
, crypto_alg
);
4390 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4392 return caam_init_common(ctx
, &caam_alg
->caam
);
4395 static int caam_aead_init(struct crypto_aead
*tfm
)
4397 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
4398 struct caam_aead_alg
*caam_alg
=
4399 container_of(alg
, struct caam_aead_alg
, aead
);
4400 struct caam_ctx
*ctx
= crypto_aead_ctx(tfm
);
4402 return caam_init_common(ctx
, &caam_alg
->caam
);
4405 static void caam_exit_common(struct caam_ctx
*ctx
)
4407 if (ctx
->sh_desc_enc_dma
&&
4408 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_enc_dma
))
4409 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_enc_dma
,
4410 desc_bytes(ctx
->sh_desc_enc
), DMA_TO_DEVICE
);
4411 if (ctx
->sh_desc_dec_dma
&&
4412 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_dec_dma
))
4413 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_dec_dma
,
4414 desc_bytes(ctx
->sh_desc_dec
), DMA_TO_DEVICE
);
4415 if (ctx
->sh_desc_givenc_dma
&&
4416 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
))
4417 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
,
4418 desc_bytes(ctx
->sh_desc_givenc
),
4421 !dma_mapping_error(ctx
->jrdev
, ctx
->key_dma
))
4422 dma_unmap_single(ctx
->jrdev
, ctx
->key_dma
,
4423 ctx
->enckeylen
+ ctx
->split_key_pad_len
,
4426 caam_jr_free(ctx
->jrdev
);
4429 static void caam_cra_exit(struct crypto_tfm
*tfm
)
4431 caam_exit_common(crypto_tfm_ctx(tfm
));
4434 static void caam_aead_exit(struct crypto_aead
*tfm
)
4436 caam_exit_common(crypto_aead_ctx(tfm
));
4439 static void __exit
caam_algapi_exit(void)
4442 struct caam_crypto_alg
*t_alg
, *n
;
4445 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
4446 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
4448 if (t_alg
->registered
)
4449 crypto_unregister_aead(&t_alg
->aead
);
4455 list_for_each_entry_safe(t_alg
, n
, &alg_list
, entry
) {
4456 crypto_unregister_alg(&t_alg
->crypto_alg
);
4457 list_del(&t_alg
->entry
);
4462 static struct caam_crypto_alg
*caam_alg_alloc(struct caam_alg_template
4465 struct caam_crypto_alg
*t_alg
;
4466 struct crypto_alg
*alg
;
4468 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
4470 pr_err("failed to allocate t_alg\n");
4471 return ERR_PTR(-ENOMEM
);
4474 alg
= &t_alg
->crypto_alg
;
4476 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", template->name
);
4477 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4478 template->driver_name
);
4479 alg
->cra_module
= THIS_MODULE
;
4480 alg
->cra_init
= caam_cra_init
;
4481 alg
->cra_exit
= caam_cra_exit
;
4482 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4483 alg
->cra_blocksize
= template->blocksize
;
4484 alg
->cra_alignmask
= 0;
4485 alg
->cra_ctxsize
= sizeof(struct caam_ctx
);
4486 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
4488 switch (template->type
) {
4489 case CRYPTO_ALG_TYPE_GIVCIPHER
:
4490 alg
->cra_type
= &crypto_givcipher_type
;
4491 alg
->cra_ablkcipher
= template->template_ablkcipher
;
4493 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4494 alg
->cra_type
= &crypto_ablkcipher_type
;
4495 alg
->cra_ablkcipher
= template->template_ablkcipher
;
4499 t_alg
->caam
.class1_alg_type
= template->class1_alg_type
;
4500 t_alg
->caam
.class2_alg_type
= template->class2_alg_type
;
4501 t_alg
->caam
.alg_op
= template->alg_op
;
4506 static void caam_aead_alg_init(struct caam_aead_alg
*t_alg
)
4508 struct aead_alg
*alg
= &t_alg
->aead
;
4510 alg
->base
.cra_module
= THIS_MODULE
;
4511 alg
->base
.cra_priority
= CAAM_CRA_PRIORITY
;
4512 alg
->base
.cra_ctxsize
= sizeof(struct caam_ctx
);
4513 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
4515 alg
->init
= caam_aead_init
;
4516 alg
->exit
= caam_aead_exit
;
4519 static int __init
caam_algapi_init(void)
4521 struct device_node
*dev_node
;
4522 struct platform_device
*pdev
;
4523 struct device
*ctrldev
;
4524 struct caam_drv_private
*priv
;
4526 u32 cha_vid
, cha_inst
, des_inst
, aes_inst
, md_inst
;
4527 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
4528 bool registered
= false;
4530 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
4532 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
4537 pdev
= of_find_device_by_node(dev_node
);
4539 of_node_put(dev_node
);
4543 ctrldev
= &pdev
->dev
;
4544 priv
= dev_get_drvdata(ctrldev
);
4545 of_node_put(dev_node
);
4548 * If priv is NULL, it's probably because the caam driver wasn't
4549 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4555 INIT_LIST_HEAD(&alg_list
);
4558 * Register crypto algorithms the device supports.
4559 * First, detect presence and attributes of DES, AES, and MD blocks.
4561 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
4562 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
4563 des_inst
= (cha_inst
& CHA_ID_LS_DES_MASK
) >> CHA_ID_LS_DES_SHIFT
;
4564 aes_inst
= (cha_inst
& CHA_ID_LS_AES_MASK
) >> CHA_ID_LS_AES_SHIFT
;
4565 md_inst
= (cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
;
4567 /* If MD is present, limit digest size based on LP256 */
4568 if (md_inst
&& ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
))
4569 md_limit
= SHA256_DIGEST_SIZE
;
4571 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4572 struct caam_crypto_alg
*t_alg
;
4573 struct caam_alg_template
*alg
= driver_algs
+ i
;
4574 u32 alg_sel
= alg
->class1_alg_type
& OP_ALG_ALGSEL_MASK
;
4576 /* Skip DES algorithms if not supported by device */
4578 ((alg_sel
== OP_ALG_ALGSEL_3DES
) ||
4579 (alg_sel
== OP_ALG_ALGSEL_DES
)))
4582 /* Skip AES algorithms if not supported by device */
4583 if (!aes_inst
&& (alg_sel
== OP_ALG_ALGSEL_AES
))
4586 t_alg
= caam_alg_alloc(alg
);
4587 if (IS_ERR(t_alg
)) {
4588 err
= PTR_ERR(t_alg
);
4589 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
4593 err
= crypto_register_alg(&t_alg
->crypto_alg
);
4595 pr_warn("%s alg registration failed\n",
4596 t_alg
->crypto_alg
.cra_driver_name
);
4601 list_add_tail(&t_alg
->entry
, &alg_list
);
4605 for (i
= 0; i
< ARRAY_SIZE(driver_aeads
); i
++) {
4606 struct caam_aead_alg
*t_alg
= driver_aeads
+ i
;
4607 u32 c1_alg_sel
= t_alg
->caam
.class1_alg_type
&
4609 u32 c2_alg_sel
= t_alg
->caam
.class2_alg_type
&
4611 u32 alg_aai
= t_alg
->caam
.class1_alg_type
& OP_ALG_AAI_MASK
;
4613 /* Skip DES algorithms if not supported by device */
4615 ((c1_alg_sel
== OP_ALG_ALGSEL_3DES
) ||
4616 (c1_alg_sel
== OP_ALG_ALGSEL_DES
)))
4619 /* Skip AES algorithms if not supported by device */
4620 if (!aes_inst
&& (c1_alg_sel
== OP_ALG_ALGSEL_AES
))
4624 * Check support for AES algorithms not available
4627 if ((cha_vid
& CHA_ID_LS_AES_MASK
) == CHA_ID_LS_AES_LP
)
4628 if (alg_aai
== OP_ALG_AAI_GCM
)
4632 * Skip algorithms requiring message digests
4633 * if MD or MD size is not supported by device.
4636 (!md_inst
|| (t_alg
->aead
.maxauthsize
> md_limit
)))
4639 caam_aead_alg_init(t_alg
);
4641 err
= crypto_register_aead(&t_alg
->aead
);
4643 pr_warn("%s alg registration failed\n",
4644 t_alg
->aead
.base
.cra_driver_name
);
4648 t_alg
->registered
= true;
4653 pr_info("caam algorithms registered in /proc/crypto\n");
4658 module_init(caam_algapi_init
);
4659 module_exit(caam_algapi_exit
);
4661 MODULE_LICENSE("GPL");
4662 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4663 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");