]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/caam/caamalg.c
crypto: caam - check sg_count() return value
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / caam / caamalg.c
CommitLineData
8e8ec596
KP
1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
8cea7b66 5 * Copyright 2016 NXP
8e8ec596
KP
6 *
7 * Based on talitos crypto API driver.
8 *
9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (PDB) |
14 * --------------- |------------->| (hashKey) |
15 * . | | (cipherKey) |
16 * . | |-------->| (operation) |
17 * --------------- | | ---------------
18 * | JobDesc #2 |------| |
19 * | *(packet 2) | |
20 * --------------- |
21 * . |
22 * . |
23 * --------------- |
24 * | JobDesc #3 |------------
25 * | *(packet 3) |
26 * ---------------
27 *
28 * The SharedDesc never changes for a connection unless rekeyed, but
29 * each packet will likely be in a different place. So all we need
30 * to know to process the packet is where the input is, where the
31 * output goes, and what context we want to process with. Context is
32 * in the SharedDesc, packet references in the JobDesc.
33 *
34 * So, a job desc looks like:
35 *
36 * ---------------------
37 * | Header |
38 * | ShareDesc Pointer |
39 * | SEQ_OUT_PTR |
40 * | (output buffer) |
6ec47334 41 * | (output length) |
8e8ec596
KP
42 * | SEQ_IN_PTR |
43 * | (input buffer) |
6ec47334 44 * | (input length) |
8e8ec596
KP
45 * ---------------------
46 */
47
48#include "compat.h"
49
50#include "regs.h"
51#include "intern.h"
52#include "desc_constr.h"
53#include "jr.h"
54#include "error.h"
a299c837 55#include "sg_sw_sec4.h"
4c1ec1f9 56#include "key_gen.h"
8cea7b66 57#include "caamalg_desc.h"
8e8ec596
KP
58
59/*
60 * crypto alg
61 */
62#define CAAM_CRA_PRIORITY 3000
63/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
64#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
daebc465 65 CTR_RFC3686_NONCE_SIZE + \
8e8ec596 66 SHA512_DIGEST_SIZE * 2)
8e8ec596 67
f2147b88
HX
68#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
479bcc7c
HX
71#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
f2147b88 73
87e51b07
HX
74#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
75#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
4427b1b4 76
8e8ec596
KP
77#ifdef DEBUG
78/* for print_hex_dumps with line references */
8e8ec596
KP
79#define debug(format, arg...) printk(format, arg)
80#else
81#define debug(format, arg...)
82#endif
5ecf8ef9
CV
83
84#ifdef DEBUG
85#include <linux/highmem.h>
86
87static void dbg_dump_sg(const char *level, const char *prefix_str,
88 int prefix_type, int rowsize, int groupsize,
00fef2b2 89 struct scatterlist *sg, size_t tlen, bool ascii)
5ecf8ef9
CV
90{
91 struct scatterlist *it;
92 void *it_page;
93 size_t len;
94 void *buf;
95
96 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
97 /*
98 * make sure the scatterlist's page
99 * has a valid virtual memory mapping
100 */
101 it_page = kmap_atomic(sg_page(it));
102 if (unlikely(!it_page)) {
103 printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
104 return;
105 }
106
107 buf = it_page + it->offset;
d69985a0 108 len = min_t(size_t, tlen, it->length);
5ecf8ef9
CV
109 print_hex_dump(level, prefix_str, prefix_type, rowsize,
110 groupsize, buf, len, ascii);
111 tlen -= len;
112
113 kunmap_atomic(it_page);
114 }
115}
116#endif
117
cfc6f11b 118static struct list_head alg_list;
8e8ec596 119
479bcc7c
HX
120struct caam_alg_entry {
121 int class1_alg_type;
122 int class2_alg_type;
479bcc7c
HX
123 bool rfc3686;
124 bool geniv;
125};
126
127struct caam_aead_alg {
128 struct aead_alg aead;
129 struct caam_alg_entry caam;
130 bool registered;
131};
132
8e8ec596
KP
133/*
134 * per-session context
135 */
136struct caam_ctx {
137 struct device *jrdev;
1acebad3
YK
138 u32 sh_desc_enc[DESC_MAX_USED_LEN];
139 u32 sh_desc_dec[DESC_MAX_USED_LEN];
140 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
141 dma_addr_t sh_desc_enc_dma;
142 dma_addr_t sh_desc_dec_dma;
143 dma_addr_t sh_desc_givenc_dma;
1acebad3 144 u8 key[CAAM_MAX_KEY_SIZE];
885e9e2f 145 dma_addr_t key_dma;
db57656b
HG
146 struct alginfo adata;
147 struct alginfo cdata;
8e8ec596
KP
148 unsigned int authsize;
149};
150
ae4a825f
HG
151static int aead_null_set_sh_desc(struct crypto_aead *aead)
152{
ae4a825f
HG
153 struct caam_ctx *ctx = crypto_aead_ctx(aead);
154 struct device *jrdev = ctx->jrdev;
ae4a825f 155 u32 *desc;
4cbe79cc
HG
156 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
157 ctx->adata.keylen_pad;
ae4a825f
HG
158
159 /*
160 * Job Descriptor and Shared Descriptors
161 * must all fit into the 64-word Descriptor h/w Buffer
162 */
4cbe79cc 163 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
db57656b 164 ctx->adata.key_inline = true;
9c0bc511 165 ctx->adata.key_virt = ctx->key;
db57656b
HG
166 } else {
167 ctx->adata.key_inline = false;
9c0bc511 168 ctx->adata.key_dma = ctx->key_dma;
db57656b 169 }
ae4a825f 170
479bcc7c 171 /* aead_encrypt shared descriptor */
ae4a825f 172 desc = ctx->sh_desc_enc;
8cea7b66 173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
ae4a825f
HG
174 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
175 desc_bytes(desc),
176 DMA_TO_DEVICE);
177 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
178 dev_err(jrdev, "unable to map shared descriptor\n");
179 return -ENOMEM;
180 }
ae4a825f
HG
181
182 /*
183 * Job Descriptor and Shared Descriptors
184 * must all fit into the 64-word Descriptor h/w Buffer
185 */
4cbe79cc 186 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
db57656b 187 ctx->adata.key_inline = true;
9c0bc511 188 ctx->adata.key_virt = ctx->key;
db57656b
HG
189 } else {
190 ctx->adata.key_inline = false;
9c0bc511 191 ctx->adata.key_dma = ctx->key_dma;
db57656b 192 }
ae4a825f 193
479bcc7c 194 /* aead_decrypt shared descriptor */
8cea7b66
HG
195 desc = ctx->sh_desc_dec;
196 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
ae4a825f
HG
197 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
198 desc_bytes(desc),
199 DMA_TO_DEVICE);
200 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
201 dev_err(jrdev, "unable to map shared descriptor\n");
202 return -ENOMEM;
203 }
ae4a825f
HG
204
205 return 0;
206}
207
1acebad3
YK
208static int aead_set_sh_desc(struct crypto_aead *aead)
209{
479bcc7c
HX
210 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
211 struct caam_aead_alg, aead);
add86d55 212 unsigned int ivsize = crypto_aead_ivsize(aead);
1acebad3
YK
213 struct caam_ctx *ctx = crypto_aead_ctx(aead);
214 struct device *jrdev = ctx->jrdev;
daebc465 215 u32 ctx1_iv_off = 0;
8cea7b66 216 u32 *desc, *nonce = NULL;
4cbe79cc
HG
217 u32 inl_mask;
218 unsigned int data_len[2];
db57656b 219 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
daebc465 220 OP_ALG_AAI_CTR_MOD128);
479bcc7c 221 const bool is_rfc3686 = alg->caam.rfc3686;
1acebad3 222
2fdea258
HG
223 if (!ctx->authsize)
224 return 0;
225
ae4a825f 226 /* NULL encryption / decryption */
db57656b 227 if (!ctx->cdata.keylen)
ae4a825f
HG
228 return aead_null_set_sh_desc(aead);
229
daebc465
CV
230 /*
231 * AES-CTR needs to load IV in CONTEXT1 reg
232 * at an offset of 128bits (16bytes)
233 * CONTEXT1[255:128] = IV
234 */
235 if (ctr_mode)
236 ctx1_iv_off = 16;
237
238 /*
239 * RFC3686 specific:
240 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
241 */
8cea7b66 242 if (is_rfc3686) {
daebc465 243 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
8cea7b66
HG
244 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
245 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
246 }
daebc465 247
4cbe79cc
HG
248 data_len[0] = ctx->adata.keylen_pad;
249 data_len[1] = ctx->cdata.keylen;
250
479bcc7c
HX
251 if (alg->caam.geniv)
252 goto skip_enc;
253
1acebad3
YK
254 /*
255 * Job Descriptor and Shared Descriptors
256 * must all fit into the 64-word Descriptor h/w Buffer
257 */
4cbe79cc
HG
258 if (desc_inline_query(DESC_AEAD_ENC_LEN +
259 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
260 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
261 ARRAY_SIZE(data_len)) < 0)
262 return -EINVAL;
263
264 if (inl_mask & 1)
9c0bc511 265 ctx->adata.key_virt = ctx->key;
4cbe79cc 266 else
9c0bc511 267 ctx->adata.key_dma = ctx->key_dma;
4cbe79cc
HG
268
269 if (inl_mask & 2)
9c0bc511 270 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4cbe79cc 271 else
9c0bc511 272 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4cbe79cc
HG
273
274 ctx->adata.key_inline = !!(inl_mask & 1);
275 ctx->cdata.key_inline = !!(inl_mask & 2);
1acebad3 276
479bcc7c 277 /* aead_encrypt shared descriptor */
1acebad3 278 desc = ctx->sh_desc_enc;
8cea7b66
HG
279 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
280 is_rfc3686, nonce, ctx1_iv_off);
1acebad3
YK
281 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
282 desc_bytes(desc),
283 DMA_TO_DEVICE);
284 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
285 dev_err(jrdev, "unable to map shared descriptor\n");
286 return -ENOMEM;
287 }
1acebad3 288
479bcc7c 289skip_enc:
1acebad3
YK
290 /*
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
293 */
4cbe79cc
HG
294 if (desc_inline_query(DESC_AEAD_DEC_LEN +
295 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
296 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
297 ARRAY_SIZE(data_len)) < 0)
298 return -EINVAL;
299
300 if (inl_mask & 1)
9c0bc511 301 ctx->adata.key_virt = ctx->key;
4cbe79cc 302 else
9c0bc511 303 ctx->adata.key_dma = ctx->key_dma;
4cbe79cc
HG
304
305 if (inl_mask & 2)
9c0bc511 306 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4cbe79cc 307 else
9c0bc511 308 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4cbe79cc
HG
309
310 ctx->adata.key_inline = !!(inl_mask & 1);
311 ctx->cdata.key_inline = !!(inl_mask & 2);
1acebad3 312
479bcc7c 313 /* aead_decrypt shared descriptor */
4464a7d4 314 desc = ctx->sh_desc_dec;
8cea7b66
HG
315 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
316 ctx->authsize, alg->caam.geniv, is_rfc3686,
317 nonce, ctx1_iv_off);
1acebad3
YK
318 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
319 desc_bytes(desc),
320 DMA_TO_DEVICE);
321 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
322 dev_err(jrdev, "unable to map shared descriptor\n");
323 return -ENOMEM;
324 }
1acebad3 325
479bcc7c
HX
326 if (!alg->caam.geniv)
327 goto skip_givenc;
328
1acebad3
YK
329 /*
330 * Job Descriptor and Shared Descriptors
331 * must all fit into the 64-word Descriptor h/w Buffer
332 */
4cbe79cc
HG
333 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
334 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
335 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
336 ARRAY_SIZE(data_len)) < 0)
337 return -EINVAL;
338
339 if (inl_mask & 1)
9c0bc511 340 ctx->adata.key_virt = ctx->key;
4cbe79cc 341 else
9c0bc511 342 ctx->adata.key_dma = ctx->key_dma;
4cbe79cc
HG
343
344 if (inl_mask & 2)
9c0bc511 345 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
4cbe79cc 346 else
9c0bc511 347 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
4cbe79cc
HG
348
349 ctx->adata.key_inline = !!(inl_mask & 1);
350 ctx->cdata.key_inline = !!(inl_mask & 2);
1acebad3
YK
351
352 /* aead_givencrypt shared descriptor */
1d2d87e8 353 desc = ctx->sh_desc_enc;
8cea7b66
HG
354 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
355 ctx->authsize, is_rfc3686, nonce,
356 ctx1_iv_off);
479bcc7c
HX
357 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
358 desc_bytes(desc),
359 DMA_TO_DEVICE);
1d2d87e8 360 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
1acebad3
YK
361 dev_err(jrdev, "unable to map shared descriptor\n");
362 return -ENOMEM;
363 }
1acebad3 364
479bcc7c 365skip_givenc:
1acebad3
YK
366 return 0;
367}
368
0e479300 369static int aead_setauthsize(struct crypto_aead *authenc,
8e8ec596
KP
370 unsigned int authsize)
371{
372 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
373
374 ctx->authsize = authsize;
1acebad3 375 aead_set_sh_desc(authenc);
8e8ec596
KP
376
377 return 0;
378}
379
3ef8d945
TA
380static int gcm_set_sh_desc(struct crypto_aead *aead)
381{
3ef8d945
TA
382 struct caam_ctx *ctx = crypto_aead_ctx(aead);
383 struct device *jrdev = ctx->jrdev;
3ef8d945 384 u32 *desc;
4cbe79cc
HG
385 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
386 ctx->cdata.keylen;
3ef8d945 387
db57656b 388 if (!ctx->cdata.keylen || !ctx->authsize)
3ef8d945
TA
389 return 0;
390
391 /*
392 * AES GCM encrypt shared descriptor
393 * Job Descriptor and Shared Descriptor
394 * must fit into the 64-word Descriptor h/w Buffer
395 */
4cbe79cc 396 if (rem_bytes >= DESC_GCM_ENC_LEN) {
db57656b 397 ctx->cdata.key_inline = true;
9c0bc511 398 ctx->cdata.key_virt = ctx->key;
db57656b
HG
399 } else {
400 ctx->cdata.key_inline = false;
9c0bc511 401 ctx->cdata.key_dma = ctx->key_dma;
db57656b 402 }
3ef8d945
TA
403
404 desc = ctx->sh_desc_enc;
8cea7b66 405 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
3ef8d945
TA
406 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
407 desc_bytes(desc),
408 DMA_TO_DEVICE);
409 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
410 dev_err(jrdev, "unable to map shared descriptor\n");
411 return -ENOMEM;
412 }
3ef8d945
TA
413
414 /*
415 * Job Descriptor and Shared Descriptors
416 * must all fit into the 64-word Descriptor h/w Buffer
417 */
4cbe79cc 418 if (rem_bytes >= DESC_GCM_DEC_LEN) {
db57656b 419 ctx->cdata.key_inline = true;
9c0bc511 420 ctx->cdata.key_virt = ctx->key;
db57656b
HG
421 } else {
422 ctx->cdata.key_inline = false;
9c0bc511 423 ctx->cdata.key_dma = ctx->key_dma;
db57656b 424 }
3ef8d945
TA
425
426 desc = ctx->sh_desc_dec;
8cea7b66 427 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
3ef8d945
TA
428 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
429 desc_bytes(desc),
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
432 dev_err(jrdev, "unable to map shared descriptor\n");
433 return -ENOMEM;
434 }
3ef8d945
TA
435
436 return 0;
437}
438
439static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
440{
441 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
442
443 ctx->authsize = authsize;
444 gcm_set_sh_desc(authenc);
445
446 return 0;
447}
448
bac68f2c
TA
449static int rfc4106_set_sh_desc(struct crypto_aead *aead)
450{
bac68f2c
TA
451 struct caam_ctx *ctx = crypto_aead_ctx(aead);
452 struct device *jrdev = ctx->jrdev;
bac68f2c 453 u32 *desc;
4cbe79cc
HG
454 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
455 ctx->cdata.keylen;
bac68f2c 456
db57656b 457 if (!ctx->cdata.keylen || !ctx->authsize)
bac68f2c
TA
458 return 0;
459
460 /*
461 * RFC4106 encrypt shared descriptor
462 * Job Descriptor and Shared Descriptor
463 * must fit into the 64-word Descriptor h/w Buffer
464 */
4cbe79cc 465 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
db57656b 466 ctx->cdata.key_inline = true;
9c0bc511 467 ctx->cdata.key_virt = ctx->key;
db57656b
HG
468 } else {
469 ctx->cdata.key_inline = false;
9c0bc511 470 ctx->cdata.key_dma = ctx->key_dma;
db57656b 471 }
bac68f2c
TA
472
473 desc = ctx->sh_desc_enc;
8cea7b66 474 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
bac68f2c
TA
475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
476 desc_bytes(desc),
477 DMA_TO_DEVICE);
478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
479 dev_err(jrdev, "unable to map shared descriptor\n");
480 return -ENOMEM;
481 }
bac68f2c
TA
482
483 /*
484 * Job Descriptor and Shared Descriptors
485 * must all fit into the 64-word Descriptor h/w Buffer
486 */
4cbe79cc 487 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
db57656b 488 ctx->cdata.key_inline = true;
9c0bc511 489 ctx->cdata.key_virt = ctx->key;
db57656b
HG
490 } else {
491 ctx->cdata.key_inline = false;
9c0bc511 492 ctx->cdata.key_dma = ctx->key_dma;
db57656b 493 }
bac68f2c
TA
494
495 desc = ctx->sh_desc_dec;
8cea7b66 496 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
bac68f2c
TA
497 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
498 desc_bytes(desc),
499 DMA_TO_DEVICE);
500 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
501 dev_err(jrdev, "unable to map shared descriptor\n");
502 return -ENOMEM;
503 }
bac68f2c 504
bac68f2c
TA
505 return 0;
506}
507
508static int rfc4106_setauthsize(struct crypto_aead *authenc,
509 unsigned int authsize)
510{
511 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
512
513 ctx->authsize = authsize;
514 rfc4106_set_sh_desc(authenc);
515
516 return 0;
517}
518
5d0429a3
TA
519static int rfc4543_set_sh_desc(struct crypto_aead *aead)
520{
5d0429a3
TA
521 struct caam_ctx *ctx = crypto_aead_ctx(aead);
522 struct device *jrdev = ctx->jrdev;
5d0429a3 523 u32 *desc;
4cbe79cc
HG
524 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
525 ctx->cdata.keylen;
5d0429a3 526
db57656b 527 if (!ctx->cdata.keylen || !ctx->authsize)
5d0429a3
TA
528 return 0;
529
530 /*
531 * RFC4543 encrypt shared descriptor
532 * Job Descriptor and Shared Descriptor
533 * must fit into the 64-word Descriptor h/w Buffer
534 */
4cbe79cc 535 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
db57656b 536 ctx->cdata.key_inline = true;
9c0bc511 537 ctx->cdata.key_virt = ctx->key;
db57656b
HG
538 } else {
539 ctx->cdata.key_inline = false;
9c0bc511 540 ctx->cdata.key_dma = ctx->key_dma;
db57656b 541 }
5d0429a3
TA
542
543 desc = ctx->sh_desc_enc;
8cea7b66 544 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
5d0429a3
TA
545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
546 desc_bytes(desc),
547 DMA_TO_DEVICE);
548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
549 dev_err(jrdev, "unable to map shared descriptor\n");
550 return -ENOMEM;
551 }
5d0429a3
TA
552
553 /*
554 * Job Descriptor and Shared Descriptors
555 * must all fit into the 64-word Descriptor h/w Buffer
556 */
4cbe79cc 557 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
db57656b 558 ctx->cdata.key_inline = true;
9c0bc511 559 ctx->cdata.key_virt = ctx->key;
db57656b
HG
560 } else {
561 ctx->cdata.key_inline = false;
9c0bc511 562 ctx->cdata.key_dma = ctx->key_dma;
db57656b 563 }
5d0429a3
TA
564
565 desc = ctx->sh_desc_dec;
8cea7b66 566 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
5d0429a3
TA
567 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
568 desc_bytes(desc),
569 DMA_TO_DEVICE);
570 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
571 dev_err(jrdev, "unable to map shared descriptor\n");
572 return -ENOMEM;
573 }
5d0429a3 574
f2147b88
HX
575 return 0;
576}
5d0429a3 577
f2147b88
HX
578static int rfc4543_setauthsize(struct crypto_aead *authenc,
579 unsigned int authsize)
580{
581 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5d0429a3 582
f2147b88
HX
583 ctx->authsize = authsize;
584 rfc4543_set_sh_desc(authenc);
5d0429a3 585
f2147b88
HX
586 return 0;
587}
5d0429a3 588
0e479300 589static int aead_setkey(struct crypto_aead *aead,
8e8ec596
KP
590 const u8 *key, unsigned int keylen)
591{
8e8ec596
KP
592 struct caam_ctx *ctx = crypto_aead_ctx(aead);
593 struct device *jrdev = ctx->jrdev;
4e6e0b27 594 struct crypto_authenc_keys keys;
8e8ec596
KP
595 int ret = 0;
596
4e6e0b27 597 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8e8ec596
KP
598 goto badkey;
599
8e8ec596
KP
600#ifdef DEBUG
601 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
4e6e0b27
HG
602 keys.authkeylen + keys.enckeylen, keys.enckeylen,
603 keys.authkeylen);
514df281 604 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8e8ec596
KP
605 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
606#endif
8e8ec596 607
6655cb8e
HG
608 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
609 keys.authkeylen, CAAM_MAX_KEY_SIZE -
610 keys.enckeylen);
8e8ec596 611 if (ret) {
8e8ec596
KP
612 goto badkey;
613 }
614
615 /* postpend encryption key to auth split key */
db57656b 616 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8e8ec596 617
db57656b 618 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
4e6e0b27 619 keys.enckeylen, DMA_TO_DEVICE);
885e9e2f 620 if (dma_mapping_error(jrdev, ctx->key_dma)) {
8e8ec596 621 dev_err(jrdev, "unable to map key i/o memory\n");
8e8ec596
KP
622 return -ENOMEM;
623 }
624#ifdef DEBUG
514df281 625 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8e8ec596 626 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
db57656b 627 ctx->adata.keylen_pad + keys.enckeylen, 1);
8e8ec596
KP
628#endif
629
db57656b 630 ctx->cdata.keylen = keys.enckeylen;
8e8ec596 631
1acebad3 632 ret = aead_set_sh_desc(aead);
8e8ec596 633 if (ret) {
db57656b 634 dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
4e6e0b27 635 keys.enckeylen, DMA_TO_DEVICE);
8e8ec596
KP
636 }
637
638 return ret;
639badkey:
640 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
641 return -EINVAL;
642}
643
3ef8d945
TA
644static int gcm_setkey(struct crypto_aead *aead,
645 const u8 *key, unsigned int keylen)
646{
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *jrdev = ctx->jrdev;
649 int ret = 0;
650
651#ifdef DEBUG
652 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
653 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
654#endif
655
656 memcpy(ctx->key, key, keylen);
657 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
658 DMA_TO_DEVICE);
659 if (dma_mapping_error(jrdev, ctx->key_dma)) {
660 dev_err(jrdev, "unable to map key i/o memory\n");
661 return -ENOMEM;
662 }
db57656b 663 ctx->cdata.keylen = keylen;
3ef8d945
TA
664
665 ret = gcm_set_sh_desc(aead);
666 if (ret) {
db57656b 667 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
3ef8d945
TA
668 DMA_TO_DEVICE);
669 }
670
671 return ret;
672}
673
bac68f2c
TA
674static int rfc4106_setkey(struct crypto_aead *aead,
675 const u8 *key, unsigned int keylen)
676{
677 struct caam_ctx *ctx = crypto_aead_ctx(aead);
678 struct device *jrdev = ctx->jrdev;
679 int ret = 0;
680
681 if (keylen < 4)
682 return -EINVAL;
683
684#ifdef DEBUG
685 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
686 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
687#endif
688
689 memcpy(ctx->key, key, keylen);
690
691 /*
692 * The last four bytes of the key material are used as the salt value
693 * in the nonce. Update the AES key length.
694 */
db57656b 695 ctx->cdata.keylen = keylen - 4;
bac68f2c 696
db57656b 697 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
bac68f2c
TA
698 DMA_TO_DEVICE);
699 if (dma_mapping_error(jrdev, ctx->key_dma)) {
700 dev_err(jrdev, "unable to map key i/o memory\n");
701 return -ENOMEM;
702 }
703
704 ret = rfc4106_set_sh_desc(aead);
705 if (ret) {
db57656b 706 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
bac68f2c
TA
707 DMA_TO_DEVICE);
708 }
709
710 return ret;
711}
712
5d0429a3
TA
713static int rfc4543_setkey(struct crypto_aead *aead,
714 const u8 *key, unsigned int keylen)
715{
716 struct caam_ctx *ctx = crypto_aead_ctx(aead);
717 struct device *jrdev = ctx->jrdev;
718 int ret = 0;
719
720 if (keylen < 4)
721 return -EINVAL;
722
723#ifdef DEBUG
724 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
725 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
726#endif
727
728 memcpy(ctx->key, key, keylen);
729
730 /*
731 * The last four bytes of the key material are used as the salt value
732 * in the nonce. Update the AES key length.
733 */
db57656b 734 ctx->cdata.keylen = keylen - 4;
5d0429a3 735
db57656b 736 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
5d0429a3
TA
737 DMA_TO_DEVICE);
738 if (dma_mapping_error(jrdev, ctx->key_dma)) {
739 dev_err(jrdev, "unable to map key i/o memory\n");
740 return -ENOMEM;
741 }
742
743 ret = rfc4543_set_sh_desc(aead);
744 if (ret) {
db57656b 745 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
5d0429a3
TA
746 DMA_TO_DEVICE);
747 }
748
749 return ret;
750}
751
acdca31d
YK
752static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
753 const u8 *key, unsigned int keylen)
754{
755 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
a5f57cff
CV
756 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
757 const char *alg_name = crypto_tfm_alg_name(tfm);
acdca31d 758 struct device *jrdev = ctx->jrdev;
8cea7b66 759 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
acdca31d 760 u32 *desc;
2b22f6c5 761 u32 ctx1_iv_off = 0;
db57656b 762 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
2b22f6c5 763 OP_ALG_AAI_CTR_MOD128);
a5f57cff
CV
764 const bool is_rfc3686 = (ctr_mode &&
765 (strstr(alg_name, "rfc3686") != NULL));
acdca31d 766
8cea7b66 767 memcpy(ctx->key, key, keylen);
acdca31d 768#ifdef DEBUG
514df281 769 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
acdca31d
YK
770 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
771#endif
2b22f6c5
CV
772 /*
773 * AES-CTR needs to load IV in CONTEXT1 reg
774 * at an offset of 128bits (16bytes)
775 * CONTEXT1[255:128] = IV
776 */
777 if (ctr_mode)
778 ctx1_iv_off = 16;
acdca31d 779
a5f57cff
CV
780 /*
781 * RFC3686 specific:
782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
783 * | *key = {KEY, NONCE}
784 */
785 if (is_rfc3686) {
786 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
787 keylen -= CTR_RFC3686_NONCE_SIZE;
788 }
789
acdca31d
YK
790 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
791 DMA_TO_DEVICE);
792 if (dma_mapping_error(jrdev, ctx->key_dma)) {
793 dev_err(jrdev, "unable to map key i/o memory\n");
794 return -ENOMEM;
795 }
db57656b 796 ctx->cdata.keylen = keylen;
9c0bc511 797 ctx->cdata.key_virt = ctx->key;
db57656b 798 ctx->cdata.key_inline = true;
acdca31d
YK
799
800 /* ablkcipher_encrypt shared descriptor */
801 desc = ctx->sh_desc_enc;
8cea7b66
HG
802 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
803 ctx1_iv_off);
acdca31d
YK
804 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
805 desc_bytes(desc),
806 DMA_TO_DEVICE);
807 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
808 dev_err(jrdev, "unable to map shared descriptor\n");
809 return -ENOMEM;
810 }
8cea7b66 811
acdca31d
YK
812 /* ablkcipher_decrypt shared descriptor */
813 desc = ctx->sh_desc_dec;
8cea7b66
HG
814 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
815 ctx1_iv_off);
acdca31d
YK
816 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
817 desc_bytes(desc),
818 DMA_TO_DEVICE);
71c65f7c 819 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
acdca31d
YK
820 dev_err(jrdev, "unable to map shared descriptor\n");
821 return -ENOMEM;
822 }
823
7222d1a3
CV
824 /* ablkcipher_givencrypt shared descriptor */
825 desc = ctx->sh_desc_givenc;
8cea7b66
HG
826 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
827 ctx1_iv_off);
7222d1a3
CV
828 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
829 desc_bytes(desc),
830 DMA_TO_DEVICE);
831 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
832 dev_err(jrdev, "unable to map shared descriptor\n");
833 return -ENOMEM;
834 }
acdca31d 835
8cea7b66 836 return 0;
acdca31d
YK
837}
838
c6415a60
CV
839static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
840 const u8 *key, unsigned int keylen)
841{
842 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
843 struct device *jrdev = ctx->jrdev;
8cea7b66 844 u32 *desc;
c6415a60
CV
845
846 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
847 crypto_ablkcipher_set_flags(ablkcipher,
848 CRYPTO_TFM_RES_BAD_KEY_LEN);
849 dev_err(jrdev, "key size mismatch\n");
850 return -EINVAL;
851 }
852
853 memcpy(ctx->key, key, keylen);
854 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
855 if (dma_mapping_error(jrdev, ctx->key_dma)) {
856 dev_err(jrdev, "unable to map key i/o memory\n");
857 return -ENOMEM;
858 }
db57656b 859 ctx->cdata.keylen = keylen;
9c0bc511 860 ctx->cdata.key_virt = ctx->key;
db57656b 861 ctx->cdata.key_inline = true;
c6415a60
CV
862
863 /* xts_ablkcipher_encrypt shared descriptor */
864 desc = ctx->sh_desc_enc;
8cea7b66 865 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
c6415a60
CV
866 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
867 DMA_TO_DEVICE);
868 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
869 dev_err(jrdev, "unable to map shared descriptor\n");
870 return -ENOMEM;
871 }
c6415a60
CV
872
873 /* xts_ablkcipher_decrypt shared descriptor */
874 desc = ctx->sh_desc_dec;
8cea7b66 875 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
c6415a60
CV
876 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
877 DMA_TO_DEVICE);
878 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
879 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
880 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
881 dev_err(jrdev, "unable to map shared descriptor\n");
882 return -ENOMEM;
883 }
c6415a60
CV
884
885 return 0;
886}
887
8e8ec596 888/*
1acebad3 889 * aead_edesc - s/w-extended aead descriptor
8e8ec596
KP
890 * @src_nents: number of segments in input scatterlist
891 * @dst_nents: number of segments in output scatterlist
a299c837
YK
892 * @sec4_sg_bytes: length of dma mapped sec4_sg space
893 * @sec4_sg_dma: bus physical mapped address of h/w link table
4ca7c7d8 894 * @sec4_sg: pointer to h/w link table
8e8ec596
KP
895 * @hw_desc: the h/w job descriptor followed by any referenced link tables
896 */
0e479300 897struct aead_edesc {
8e8ec596
KP
898 int src_nents;
899 int dst_nents;
a299c837
YK
900 int sec4_sg_bytes;
901 dma_addr_t sec4_sg_dma;
902 struct sec4_sg_entry *sec4_sg;
f2147b88 903 u32 hw_desc[];
8e8ec596
KP
904};
905
acdca31d
YK
906/*
907 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
908 * @src_nents: number of segments in input scatterlist
909 * @dst_nents: number of segments in output scatterlist
910 * @iv_dma: dma address of iv for checking continuity and link table
a299c837
YK
911 * @sec4_sg_bytes: length of dma mapped sec4_sg space
912 * @sec4_sg_dma: bus physical mapped address of h/w link table
4ca7c7d8 913 * @sec4_sg: pointer to h/w link table
acdca31d
YK
914 * @hw_desc: the h/w job descriptor followed by any referenced link tables
915 */
916struct ablkcipher_edesc {
917 int src_nents;
918 int dst_nents;
919 dma_addr_t iv_dma;
a299c837
YK
920 int sec4_sg_bytes;
921 dma_addr_t sec4_sg_dma;
922 struct sec4_sg_entry *sec4_sg;
acdca31d
YK
923 u32 hw_desc[0];
924};
925
1acebad3 926static void caam_unmap(struct device *dev, struct scatterlist *src,
643b39b0 927 struct scatterlist *dst, int src_nents,
13fb8fd7 928 int dst_nents,
a299c837
YK
929 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
930 int sec4_sg_bytes)
8e8ec596 931{
643b39b0 932 if (dst != src) {
13fb8fd7
LC
933 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
934 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
8e8ec596 935 } else {
13fb8fd7 936 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
8e8ec596
KP
937 }
938
1acebad3
YK
939 if (iv_dma)
940 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
a299c837
YK
941 if (sec4_sg_bytes)
942 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
8e8ec596
KP
943 DMA_TO_DEVICE);
944}
945
1acebad3
YK
946static void aead_unmap(struct device *dev,
947 struct aead_edesc *edesc,
948 struct aead_request *req)
f2147b88
HX
949{
950 caam_unmap(dev, req->src, req->dst,
13fb8fd7 951 edesc->src_nents, edesc->dst_nents, 0, 0,
f2147b88
HX
952 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
953}
954
acdca31d
YK
955static void ablkcipher_unmap(struct device *dev,
956 struct ablkcipher_edesc *edesc,
957 struct ablkcipher_request *req)
958{
959 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
960 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
961
962 caam_unmap(dev, req->src, req->dst,
13fb8fd7
LC
963 edesc->src_nents, edesc->dst_nents,
964 edesc->iv_dma, ivsize,
643b39b0 965 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
acdca31d
YK
966}
967
0e479300 968static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
8e8ec596
KP
969 void *context)
970{
0e479300
YK
971 struct aead_request *req = context;
972 struct aead_edesc *edesc;
f2147b88
HX
973
974#ifdef DEBUG
975 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
976#endif
977
978 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
979
980 if (err)
981 caam_jr_strstatus(jrdev, err);
982
983 aead_unmap(jrdev, edesc, req);
984
985 kfree(edesc);
986
987 aead_request_complete(req, err);
988}
989
0e479300 990static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
8e8ec596
KP
991 void *context)
992{
0e479300
YK
993 struct aead_request *req = context;
994 struct aead_edesc *edesc;
f2147b88
HX
995
996#ifdef DEBUG
997 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
998#endif
999
1000 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
1001
1002 if (err)
1003 caam_jr_strstatus(jrdev, err);
1004
1005 aead_unmap(jrdev, edesc, req);
1006
1007 /*
1008 * verify hw auth check passed else return -EBADMSG
1009 */
1010 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
1011 err = -EBADMSG;
1012
1013 kfree(edesc);
1014
1015 aead_request_complete(req, err);
1016}
1017
acdca31d
YK
1018static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1019 void *context)
1020{
1021 struct ablkcipher_request *req = context;
1022 struct ablkcipher_edesc *edesc;
1023#ifdef DEBUG
1024 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1025 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1026
1027 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1028#endif
1029
4ca7c7d8 1030 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
acdca31d 1031
fa9659cd
MV
1032 if (err)
1033 caam_jr_strstatus(jrdev, err);
acdca31d
YK
1034
1035#ifdef DEBUG
514df281 1036 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
acdca31d
YK
1037 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1038 edesc->src_nents > 1 ? 100 : ivsize, 1);
5ecf8ef9
CV
1039 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1040 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
00fef2b2 1041 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
acdca31d
YK
1042#endif
1043
1044 ablkcipher_unmap(jrdev, edesc, req);
1045 kfree(edesc);
1046
1047 ablkcipher_request_complete(req, err);
1048}
1049
1050static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1051 void *context)
1052{
1053 struct ablkcipher_request *req = context;
1054 struct ablkcipher_edesc *edesc;
1055#ifdef DEBUG
1056 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1057 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1058
1059 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1060#endif
1061
4ca7c7d8 1062 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
fa9659cd
MV
1063 if (err)
1064 caam_jr_strstatus(jrdev, err);
acdca31d
YK
1065
1066#ifdef DEBUG
514df281 1067 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
acdca31d
YK
1068 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1069 ivsize, 1);
5ecf8ef9
CV
1070 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
1071 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
00fef2b2 1072 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
acdca31d
YK
1073#endif
1074
1075 ablkcipher_unmap(jrdev, edesc, req);
1076 kfree(edesc);
1077
1078 ablkcipher_request_complete(req, err);
1079}
1080
f2147b88
HX
1081/*
1082 * Fill in aead job descriptor
1083 */
1084static void init_aead_job(struct aead_request *req,
1085 struct aead_edesc *edesc,
1086 bool all_contig, bool encrypt)
1087{
1088 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1089 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1090 int authsize = ctx->authsize;
1091 u32 *desc = edesc->hw_desc;
1092 u32 out_options, in_options;
1093 dma_addr_t dst_dma, src_dma;
1094 int len, sec4_sg_index = 0;
1095 dma_addr_t ptr;
1096 u32 *sh_desc;
1097
1098 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
1099 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
1100
1101 len = desc_len(sh_desc);
1102 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1103
1104 if (all_contig) {
1105 src_dma = sg_dma_address(req->src);
1106 in_options = 0;
1107 } else {
1108 src_dma = edesc->sec4_sg_dma;
1109 sec4_sg_index += edesc->src_nents;
1110 in_options = LDST_SGF;
1111 }
1112
1113 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1114 in_options);
1115
1116 dst_dma = src_dma;
1117 out_options = in_options;
1118
1119 if (unlikely(req->src != req->dst)) {
1120 if (!edesc->dst_nents) {
1121 dst_dma = sg_dma_address(req->dst);
1122 } else {
1123 dst_dma = edesc->sec4_sg_dma +
1124 sec4_sg_index *
1125 sizeof(struct sec4_sg_entry);
1126 out_options = LDST_SGF;
1127 }
1128 }
1129
1130 if (encrypt)
1131 append_seq_out_ptr(desc, dst_dma,
1132 req->assoclen + req->cryptlen + authsize,
1133 out_options);
1134 else
1135 append_seq_out_ptr(desc, dst_dma,
1136 req->assoclen + req->cryptlen - authsize,
1137 out_options);
1138
1139 /* REG3 = assoclen */
1140 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1141}
1142
1143static void init_gcm_job(struct aead_request *req,
1144 struct aead_edesc *edesc,
1145 bool all_contig, bool encrypt)
1146{
1147 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1148 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1149 unsigned int ivsize = crypto_aead_ivsize(aead);
1150 u32 *desc = edesc->hw_desc;
1151 bool generic_gcm = (ivsize == 12);
1152 unsigned int last;
1153
1154 init_aead_job(req, edesc, all_contig, encrypt);
1155
1156 /* BUG This should not be specific to generic GCM. */
1157 last = 0;
1158 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1159 last = FIFOLD_TYPE_LAST1;
1160
1161 /* Read GCM IV */
1162 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1163 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
1164 /* Append Salt */
1165 if (!generic_gcm)
db57656b 1166 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
f2147b88
HX
1167 /* Append IV */
1168 append_data(desc, req->iv, ivsize);
1169 /* End of blank commands */
1170}
1171
479bcc7c
HX
1172static void init_authenc_job(struct aead_request *req,
1173 struct aead_edesc *edesc,
1174 bool all_contig, bool encrypt)
1acebad3
YK
1175{
1176 struct crypto_aead *aead = crypto_aead_reqtfm(req);
479bcc7c
HX
1177 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1178 struct caam_aead_alg, aead);
1179 unsigned int ivsize = crypto_aead_ivsize(aead);
1acebad3 1180 struct caam_ctx *ctx = crypto_aead_ctx(aead);
db57656b 1181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
479bcc7c
HX
1182 OP_ALG_AAI_CTR_MOD128);
1183 const bool is_rfc3686 = alg->caam.rfc3686;
1acebad3 1184 u32 *desc = edesc->hw_desc;
479bcc7c 1185 u32 ivoffset = 0;
8e8ec596 1186
479bcc7c
HX
1187 /*
1188 * AES-CTR needs to load IV in CONTEXT1 reg
1189 * at an offset of 128bits (16bytes)
1190 * CONTEXT1[255:128] = IV
1191 */
1192 if (ctr_mode)
1193 ivoffset = 16;
1acebad3 1194
479bcc7c
HX
1195 /*
1196 * RFC3686 specific:
1197 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1198 */
1199 if (is_rfc3686)
1200 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
8e8ec596 1201
479bcc7c 1202 init_aead_job(req, edesc, all_contig, encrypt);
1acebad3 1203
8b18e235 1204 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
479bcc7c
HX
1205 append_load_as_imm(desc, req->iv, ivsize,
1206 LDST_CLASS_1_CCB |
1207 LDST_SRCDST_BYTE_CONTEXT |
1208 (ivoffset << LDST_OFFSET_SHIFT));
8e8ec596
KP
1209}
1210
acdca31d
YK
1211/*
1212 * Fill in ablkcipher job descriptor
1213 */
1214static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1215 struct ablkcipher_edesc *edesc,
1216 struct ablkcipher_request *req,
1217 bool iv_contig)
1218{
1219 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1220 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1221 u32 *desc = edesc->hw_desc;
1222 u32 out_options = 0, in_options;
1223 dma_addr_t dst_dma, src_dma;
a299c837 1224 int len, sec4_sg_index = 0;
acdca31d
YK
1225
1226#ifdef DEBUG
514df281 1227 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
acdca31d
YK
1228 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1229 ivsize, 1);
5ecf8ef9
CV
1230 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
1231 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
1232 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
00fef2b2 1233 edesc->src_nents ? 100 : req->nbytes, 1);
acdca31d
YK
1234#endif
1235
1236 len = desc_len(sh_desc);
1237 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1238
1239 if (iv_contig) {
1240 src_dma = edesc->iv_dma;
1241 in_options = 0;
1242 } else {
a299c837 1243 src_dma = edesc->sec4_sg_dma;
35b82e55 1244 sec4_sg_index += edesc->src_nents + 1;
acdca31d
YK
1245 in_options = LDST_SGF;
1246 }
1247 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1248
1249 if (likely(req->src == req->dst)) {
1250 if (!edesc->src_nents && iv_contig) {
1251 dst_dma = sg_dma_address(req->src);
1252 } else {
a299c837
YK
1253 dst_dma = edesc->sec4_sg_dma +
1254 sizeof(struct sec4_sg_entry);
acdca31d
YK
1255 out_options = LDST_SGF;
1256 }
1257 } else {
1258 if (!edesc->dst_nents) {
1259 dst_dma = sg_dma_address(req->dst);
1260 } else {
a299c837
YK
1261 dst_dma = edesc->sec4_sg_dma +
1262 sec4_sg_index * sizeof(struct sec4_sg_entry);
acdca31d
YK
1263 out_options = LDST_SGF;
1264 }
1265 }
1266 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1267}
1268
7222d1a3
CV
1269/*
1270 * Fill in ablkcipher givencrypt job descriptor
1271 */
1272static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1273 struct ablkcipher_edesc *edesc,
1274 struct ablkcipher_request *req,
1275 bool iv_contig)
1276{
1277 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1278 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1279 u32 *desc = edesc->hw_desc;
1280 u32 out_options, in_options;
1281 dma_addr_t dst_dma, src_dma;
1282 int len, sec4_sg_index = 0;
1283
1284#ifdef DEBUG
1285 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1286 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1287 ivsize, 1);
5ecf8ef9
CV
1288 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1289 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
00fef2b2 1290 edesc->src_nents ? 100 : req->nbytes, 1);
7222d1a3
CV
1291#endif
1292
1293 len = desc_len(sh_desc);
1294 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1295
1296 if (!edesc->src_nents) {
1297 src_dma = sg_dma_address(req->src);
1298 in_options = 0;
1299 } else {
1300 src_dma = edesc->sec4_sg_dma;
1301 sec4_sg_index += edesc->src_nents;
1302 in_options = LDST_SGF;
1303 }
1304 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
1305
1306 if (iv_contig) {
1307 dst_dma = edesc->iv_dma;
1308 out_options = 0;
1309 } else {
1310 dst_dma = edesc->sec4_sg_dma +
1311 sec4_sg_index * sizeof(struct sec4_sg_entry);
1312 out_options = LDST_SGF;
1313 }
1314 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
1315}
1316
8e8ec596 1317/*
1acebad3 1318 * allocate and map the aead extended descriptor
8e8ec596 1319 */
479bcc7c
HX
1320static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1321 int desc_bytes, bool *all_contig_ptr,
1322 bool encrypt)
8e8ec596 1323{
0e479300 1324 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1325 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1326 struct device *jrdev = ctx->jrdev;
1acebad3
YK
1327 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1328 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
479bcc7c 1329 int src_nents, dst_nents = 0;
0e479300 1330 struct aead_edesc *edesc;
1acebad3
YK
1331 int sgc;
1332 bool all_contig = true;
a299c837 1333 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
bbf9c893 1334 unsigned int authsize = ctx->authsize;
1acebad3 1335
bbf9c893 1336 if (unlikely(req->dst != req->src)) {
13fb8fd7 1337 src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
fd144d83
HG
1338 if (unlikely(src_nents < 0)) {
1339 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1340 req->assoclen + req->cryptlen);
1341 return ERR_PTR(src_nents);
1342 }
1343
bbf9c893 1344 dst_nents = sg_count(req->dst,
479bcc7c 1345 req->assoclen + req->cryptlen +
13fb8fd7 1346 (encrypt ? authsize : (-authsize)));
fd144d83
HG
1347 if (unlikely(dst_nents < 0)) {
1348 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1349 req->assoclen + req->cryptlen +
1350 (encrypt ? authsize : (-authsize)));
1351 return ERR_PTR(dst_nents);
1352 }
bbf9c893
HG
1353 } else {
1354 src_nents = sg_count(req->src,
479bcc7c 1355 req->assoclen + req->cryptlen +
13fb8fd7 1356 (encrypt ? authsize : 0));
fd144d83
HG
1357 if (unlikely(src_nents < 0)) {
1358 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1359 req->assoclen + req->cryptlen +
1360 (encrypt ? authsize : 0));
1361 return ERR_PTR(src_nents);
1362 }
f2147b88 1363 }
3ef8d945 1364
f2147b88
HX
1365 /* Check if data are contiguous. */
1366 all_contig = !src_nents;
c530e341 1367 if (!all_contig)
f2147b88 1368 sec4_sg_len = src_nents;
3ef8d945 1369
a299c837 1370 sec4_sg_len += dst_nents;
8e8ec596 1371
a299c837 1372 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
8e8ec596
KP
1373
1374 /* allocate space for base edesc and hw desc commands, link tables */
dde20ae9
VM
1375 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1376 GFP_DMA | flags);
8e8ec596
KP
1377 if (!edesc) {
1378 dev_err(jrdev, "could not allocate extended descriptor\n");
1379 return ERR_PTR(-ENOMEM);
1380 }
1381
f2147b88 1382 if (likely(req->src == req->dst)) {
13fb8fd7
LC
1383 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1384 DMA_BIDIRECTIONAL);
f2147b88
HX
1385 if (unlikely(!sgc)) {
1386 dev_err(jrdev, "unable to map source\n");
1387 kfree(edesc);
1388 return ERR_PTR(-ENOMEM);
1389 }
1390 } else {
13fb8fd7
LC
1391 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1392 DMA_TO_DEVICE);
f2147b88
HX
1393 if (unlikely(!sgc)) {
1394 dev_err(jrdev, "unable to map source\n");
1395 kfree(edesc);
1396 return ERR_PTR(-ENOMEM);
1397 }
1398
13fb8fd7
LC
1399 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1400 DMA_FROM_DEVICE);
f2147b88
HX
1401 if (unlikely(!sgc)) {
1402 dev_err(jrdev, "unable to map destination\n");
13fb8fd7
LC
1403 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1404 DMA_TO_DEVICE);
f2147b88
HX
1405 kfree(edesc);
1406 return ERR_PTR(-ENOMEM);
1407 }
1408 }
1409
8e8ec596
KP
1410 edesc->src_nents = src_nents;
1411 edesc->dst_nents = dst_nents;
a299c837
YK
1412 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1413 desc_bytes;
1acebad3
YK
1414 *all_contig_ptr = all_contig;
1415
a299c837 1416 sec4_sg_index = 0;
1acebad3 1417 if (!all_contig) {
7793bda8 1418 sg_to_sec4_sg_last(req->src, src_nents,
f2147b88 1419 edesc->sec4_sg + sec4_sg_index, 0);
35b82e55 1420 sec4_sg_index += src_nents;
1acebad3
YK
1421 }
1422 if (dst_nents) {
a299c837
YK
1423 sg_to_sec4_sg_last(req->dst, dst_nents,
1424 edesc->sec4_sg + sec4_sg_index, 0);
1acebad3 1425 }
f2147b88
HX
1426
1427 if (!sec4_sg_bytes)
1428 return edesc;
1429
1da2be33
RG
1430 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1431 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
1432 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1433 dev_err(jrdev, "unable to map S/G table\n");
f2147b88
HX
1434 aead_unmap(jrdev, edesc, req);
1435 kfree(edesc);
ce572085
HG
1436 return ERR_PTR(-ENOMEM);
1437 }
8e8ec596 1438
f2147b88
HX
1439 edesc->sec4_sg_bytes = sec4_sg_bytes;
1440
8e8ec596
KP
1441 return edesc;
1442}
1443
f2147b88 1444static int gcm_encrypt(struct aead_request *req)
8e8ec596 1445{
0e479300
YK
1446 struct aead_edesc *edesc;
1447 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1448 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1449 struct device *jrdev = ctx->jrdev;
1acebad3 1450 bool all_contig;
8e8ec596 1451 u32 *desc;
1acebad3
YK
1452 int ret = 0;
1453
8e8ec596 1454 /* allocate extended descriptor */
f2147b88 1455 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
8e8ec596
KP
1456 if (IS_ERR(edesc))
1457 return PTR_ERR(edesc);
1458
1acebad3 1459 /* Create and submit job descriptor */
f2147b88 1460 init_gcm_job(req, edesc, all_contig, true);
1acebad3 1461#ifdef DEBUG
514df281 1462 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1463 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1464 desc_bytes(edesc->hw_desc), 1);
1465#endif
8e8ec596 1466
1acebad3
YK
1467 desc = edesc->hw_desc;
1468 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1469 if (!ret) {
1470 ret = -EINPROGRESS;
1471 } else {
1472 aead_unmap(jrdev, edesc, req);
1473 kfree(edesc);
1474 }
8e8ec596 1475
1acebad3 1476 return ret;
8e8ec596
KP
1477}
1478
46218750
HX
1479static int ipsec_gcm_encrypt(struct aead_request *req)
1480{
1481 if (req->assoclen < 8)
1482 return -EINVAL;
1483
1484 return gcm_encrypt(req);
1485}
1486
479bcc7c 1487static int aead_encrypt(struct aead_request *req)
f2147b88
HX
1488{
1489 struct aead_edesc *edesc;
1490 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1491 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1492 struct device *jrdev = ctx->jrdev;
1493 bool all_contig;
1494 u32 *desc;
1495 int ret = 0;
1496
1497 /* allocate extended descriptor */
479bcc7c
HX
1498 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1499 &all_contig, true);
f2147b88
HX
1500 if (IS_ERR(edesc))
1501 return PTR_ERR(edesc);
1502
1503 /* Create and submit job descriptor */
479bcc7c 1504 init_authenc_job(req, edesc, all_contig, true);
f2147b88
HX
1505#ifdef DEBUG
1506 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1507 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1508 desc_bytes(edesc->hw_desc), 1);
1509#endif
1510
1511 desc = edesc->hw_desc;
479bcc7c 1512 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
f2147b88
HX
1513 if (!ret) {
1514 ret = -EINPROGRESS;
1515 } else {
479bcc7c 1516 aead_unmap(jrdev, edesc, req);
f2147b88
HX
1517 kfree(edesc);
1518 }
1519
1520 return ret;
1521}
1522
1523static int gcm_decrypt(struct aead_request *req)
1524{
1525 struct aead_edesc *edesc;
1526 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1527 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1528 struct device *jrdev = ctx->jrdev;
1529 bool all_contig;
1530 u32 *desc;
1531 int ret = 0;
1532
1533 /* allocate extended descriptor */
1534 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1535 if (IS_ERR(edesc))
1536 return PTR_ERR(edesc);
1537
1538 /* Create and submit job descriptor*/
1539 init_gcm_job(req, edesc, all_contig, false);
1540#ifdef DEBUG
1541 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1542 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1543 desc_bytes(edesc->hw_desc), 1);
1544#endif
1545
1546 desc = edesc->hw_desc;
1547 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1548 if (!ret) {
1549 ret = -EINPROGRESS;
1550 } else {
1551 aead_unmap(jrdev, edesc, req);
1552 kfree(edesc);
1553 }
1554
1555 return ret;
1556}
1557
46218750
HX
1558static int ipsec_gcm_decrypt(struct aead_request *req)
1559{
1560 if (req->assoclen < 8)
1561 return -EINVAL;
1562
1563 return gcm_decrypt(req);
1564}
1565
479bcc7c 1566static int aead_decrypt(struct aead_request *req)
8e8ec596 1567{
1acebad3 1568 struct aead_edesc *edesc;
8e8ec596 1569 struct crypto_aead *aead = crypto_aead_reqtfm(req);
8e8ec596
KP
1570 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1571 struct device *jrdev = ctx->jrdev;
1acebad3 1572 bool all_contig;
8e8ec596 1573 u32 *desc;
1acebad3 1574 int ret = 0;
8e8ec596 1575
5ecf8ef9 1576#ifdef DEBUG
5ecf8ef9
CV
1577 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1578 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
00fef2b2 1579 req->assoclen + req->cryptlen, 1);
5ecf8ef9
CV
1580#endif
1581
8e8ec596 1582 /* allocate extended descriptor */
479bcc7c
HX
1583 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1584 &all_contig, false);
8e8ec596
KP
1585 if (IS_ERR(edesc))
1586 return PTR_ERR(edesc);
1587
1acebad3 1588 /* Create and submit job descriptor*/
479bcc7c 1589 init_authenc_job(req, edesc, all_contig, false);
1acebad3 1590#ifdef DEBUG
514df281 1591 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1acebad3
YK
1592 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1593 desc_bytes(edesc->hw_desc), 1);
1594#endif
1595
8e8ec596 1596 desc = edesc->hw_desc;
479bcc7c 1597 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1acebad3
YK
1598 if (!ret) {
1599 ret = -EINPROGRESS;
1600 } else {
479bcc7c 1601 aead_unmap(jrdev, edesc, req);
1acebad3
YK
1602 kfree(edesc);
1603 }
8e8ec596 1604
1acebad3
YK
1605 return ret;
1606}
8e8ec596 1607
acdca31d
YK
1608/*
1609 * allocate and map the ablkcipher extended descriptor for ablkcipher
1610 */
1611static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1612 *req, int desc_bytes,
1613 bool *iv_contig_out)
1614{
1615 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1616 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1617 struct device *jrdev = ctx->jrdev;
1618 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1619 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1620 GFP_KERNEL : GFP_ATOMIC;
a299c837 1621 int src_nents, dst_nents = 0, sec4_sg_bytes;
acdca31d
YK
1622 struct ablkcipher_edesc *edesc;
1623 dma_addr_t iv_dma = 0;
1624 bool iv_contig = false;
1625 int sgc;
1626 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
a299c837 1627 int sec4_sg_index;
acdca31d 1628
13fb8fd7 1629 src_nents = sg_count(req->src, req->nbytes);
fd144d83
HG
1630 if (unlikely(src_nents < 0)) {
1631 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1632 req->nbytes);
1633 return ERR_PTR(src_nents);
1634 }
acdca31d 1635
fd144d83 1636 if (req->dst != req->src) {
13fb8fd7 1637 dst_nents = sg_count(req->dst, req->nbytes);
fd144d83
HG
1638 if (unlikely(dst_nents < 0)) {
1639 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1640 req->nbytes);
1641 return ERR_PTR(dst_nents);
1642 }
1643 }
acdca31d
YK
1644
1645 if (likely(req->src == req->dst)) {
13fb8fd7
LC
1646 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1647 DMA_BIDIRECTIONAL);
c73e36e8
HG
1648 if (unlikely(!sgc)) {
1649 dev_err(jrdev, "unable to map source\n");
1650 return ERR_PTR(-ENOMEM);
1651 }
acdca31d 1652 } else {
13fb8fd7
LC
1653 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1654 DMA_TO_DEVICE);
c73e36e8
HG
1655 if (unlikely(!sgc)) {
1656 dev_err(jrdev, "unable to map source\n");
1657 return ERR_PTR(-ENOMEM);
1658 }
1659
13fb8fd7
LC
1660 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1661 DMA_FROM_DEVICE);
c73e36e8
HG
1662 if (unlikely(!sgc)) {
1663 dev_err(jrdev, "unable to map destination\n");
1664 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1665 DMA_TO_DEVICE);
1666 return ERR_PTR(-ENOMEM);
1667 }
acdca31d
YK
1668 }
1669
ce572085
HG
1670 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1671 if (dma_mapping_error(jrdev, iv_dma)) {
1672 dev_err(jrdev, "unable to map IV\n");
c73e36e8
HG
1673 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1674 0, 0, 0);
ce572085
HG
1675 return ERR_PTR(-ENOMEM);
1676 }
1677
acdca31d
YK
1678 /*
1679 * Check if iv can be contiguous with source and destination.
1680 * If so, include it. If not, create scatterlist.
1681 */
acdca31d
YK
1682 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1683 iv_contig = true;
1684 else
1685 src_nents = src_nents ? : 1;
a299c837
YK
1686 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1687 sizeof(struct sec4_sg_entry);
acdca31d
YK
1688
1689 /* allocate space for base edesc and hw desc commands, link tables */
dde20ae9
VM
1690 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1691 GFP_DMA | flags);
acdca31d
YK
1692 if (!edesc) {
1693 dev_err(jrdev, "could not allocate extended descriptor\n");
c73e36e8
HG
1694 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1695 iv_dma, ivsize, 0, 0);
acdca31d
YK
1696 return ERR_PTR(-ENOMEM);
1697 }
1698
1699 edesc->src_nents = src_nents;
1700 edesc->dst_nents = dst_nents;
a299c837
YK
1701 edesc->sec4_sg_bytes = sec4_sg_bytes;
1702 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1703 desc_bytes;
acdca31d 1704
a299c837 1705 sec4_sg_index = 0;
acdca31d 1706 if (!iv_contig) {
a299c837
YK
1707 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1708 sg_to_sec4_sg_last(req->src, src_nents,
1709 edesc->sec4_sg + 1, 0);
1710 sec4_sg_index += 1 + src_nents;
acdca31d
YK
1711 }
1712
643b39b0 1713 if (dst_nents) {
a299c837
YK
1714 sg_to_sec4_sg_last(req->dst, dst_nents,
1715 edesc->sec4_sg + sec4_sg_index, 0);
acdca31d
YK
1716 }
1717
a299c837
YK
1718 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1719 sec4_sg_bytes, DMA_TO_DEVICE);
ce572085
HG
1720 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1721 dev_err(jrdev, "unable to map S/G table\n");
c73e36e8
HG
1722 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1723 iv_dma, ivsize, 0, 0);
1724 kfree(edesc);
ce572085
HG
1725 return ERR_PTR(-ENOMEM);
1726 }
1727
acdca31d
YK
1728 edesc->iv_dma = iv_dma;
1729
1730#ifdef DEBUG
514df281 1731 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
a299c837
YK
1732 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1733 sec4_sg_bytes, 1);
acdca31d
YK
1734#endif
1735
1736 *iv_contig_out = iv_contig;
1737 return edesc;
1738}
1739
1740static int ablkcipher_encrypt(struct ablkcipher_request *req)
1741{
1742 struct ablkcipher_edesc *edesc;
1743 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1744 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1745 struct device *jrdev = ctx->jrdev;
1746 bool iv_contig;
1747 u32 *desc;
1748 int ret = 0;
1749
1750 /* allocate extended descriptor */
1751 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1752 CAAM_CMD_SZ, &iv_contig);
1753 if (IS_ERR(edesc))
1754 return PTR_ERR(edesc);
1755
1756 /* Create and submit job descriptor*/
1757 init_ablkcipher_job(ctx->sh_desc_enc,
1758 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1759#ifdef DEBUG
514df281 1760 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
acdca31d
YK
1761 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1762 desc_bytes(edesc->hw_desc), 1);
1763#endif
1764 desc = edesc->hw_desc;
1765 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1766
1767 if (!ret) {
1768 ret = -EINPROGRESS;
1769 } else {
1770 ablkcipher_unmap(jrdev, edesc, req);
1771 kfree(edesc);
1772 }
1773
1774 return ret;
1775}
1776
1777static int ablkcipher_decrypt(struct ablkcipher_request *req)
1778{
1779 struct ablkcipher_edesc *edesc;
1780 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1781 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1782 struct device *jrdev = ctx->jrdev;
1783 bool iv_contig;
1784 u32 *desc;
1785 int ret = 0;
1786
1787 /* allocate extended descriptor */
1788 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1789 CAAM_CMD_SZ, &iv_contig);
1790 if (IS_ERR(edesc))
1791 return PTR_ERR(edesc);
1792
1793 /* Create and submit job descriptor*/
1794 init_ablkcipher_job(ctx->sh_desc_dec,
1795 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1796 desc = edesc->hw_desc;
1797#ifdef DEBUG
514df281 1798 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
acdca31d
YK
1799 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1800 desc_bytes(edesc->hw_desc), 1);
1801#endif
1802
1803 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1804 if (!ret) {
1805 ret = -EINPROGRESS;
1806 } else {
1807 ablkcipher_unmap(jrdev, edesc, req);
1808 kfree(edesc);
1809 }
1810
1811 return ret;
1812}
1813
7222d1a3
CV
1814/*
1815 * allocate and map the ablkcipher extended descriptor
1816 * for ablkcipher givencrypt
1817 */
1818static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1819 struct skcipher_givcrypt_request *greq,
1820 int desc_bytes,
1821 bool *iv_contig_out)
1822{
1823 struct ablkcipher_request *req = &greq->creq;
1824 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1825 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1826 struct device *jrdev = ctx->jrdev;
1827 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1828 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1829 GFP_KERNEL : GFP_ATOMIC;
fd88aac9 1830 int src_nents, dst_nents, sec4_sg_bytes;
7222d1a3
CV
1831 struct ablkcipher_edesc *edesc;
1832 dma_addr_t iv_dma = 0;
1833 bool iv_contig = false;
1834 int sgc;
1835 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
7222d1a3
CV
1836 int sec4_sg_index;
1837
13fb8fd7 1838 src_nents = sg_count(req->src, req->nbytes);
fd144d83
HG
1839 if (unlikely(src_nents < 0)) {
1840 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1841 req->nbytes);
1842 return ERR_PTR(src_nents);
1843 }
7222d1a3 1844
7222d1a3 1845 if (likely(req->src == req->dst)) {
13fb8fd7
LC
1846 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1847 DMA_BIDIRECTIONAL);
c73e36e8
HG
1848 if (unlikely(!sgc)) {
1849 dev_err(jrdev, "unable to map source\n");
1850 return ERR_PTR(-ENOMEM);
1851 }
fd88aac9
HG
1852
1853 dst_nents = src_nents;
7222d1a3 1854 } else {
13fb8fd7
LC
1855 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1856 DMA_TO_DEVICE);
c73e36e8
HG
1857 if (unlikely(!sgc)) {
1858 dev_err(jrdev, "unable to map source\n");
1859 return ERR_PTR(-ENOMEM);
1860 }
1861
fd88aac9 1862 dst_nents = sg_count(req->dst, req->nbytes);
fd144d83
HG
1863 if (unlikely(dst_nents < 0)) {
1864 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1865 req->nbytes);
1866 return ERR_PTR(dst_nents);
1867 }
1868
13fb8fd7
LC
1869 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1870 DMA_FROM_DEVICE);
c73e36e8
HG
1871 if (unlikely(!sgc)) {
1872 dev_err(jrdev, "unable to map destination\n");
1873 dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
1874 DMA_TO_DEVICE);
1875 return ERR_PTR(-ENOMEM);
1876 }
7222d1a3
CV
1877 }
1878
1879 /*
1880 * Check if iv can be contiguous with source and destination.
1881 * If so, include it. If not, create scatterlist.
1882 */
1883 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1884 if (dma_mapping_error(jrdev, iv_dma)) {
1885 dev_err(jrdev, "unable to map IV\n");
c73e36e8
HG
1886 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1887 0, 0, 0);
7222d1a3
CV
1888 return ERR_PTR(-ENOMEM);
1889 }
1890
1891 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
1892 iv_contig = true;
1893 else
1894 dst_nents = dst_nents ? : 1;
1895 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1896 sizeof(struct sec4_sg_entry);
1897
1898 /* allocate space for base edesc and hw desc commands, link tables */
dde20ae9
VM
1899 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1900 GFP_DMA | flags);
7222d1a3
CV
1901 if (!edesc) {
1902 dev_err(jrdev, "could not allocate extended descriptor\n");
c73e36e8
HG
1903 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1904 iv_dma, ivsize, 0, 0);
7222d1a3
CV
1905 return ERR_PTR(-ENOMEM);
1906 }
1907
1908 edesc->src_nents = src_nents;
7222d1a3 1909 edesc->dst_nents = dst_nents;
7222d1a3
CV
1910 edesc->sec4_sg_bytes = sec4_sg_bytes;
1911 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1912 desc_bytes;
1913
1914 sec4_sg_index = 0;
1915 if (src_nents) {
1916 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1917 sec4_sg_index += src_nents;
1918 }
1919
1920 if (!iv_contig) {
1921 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1922 iv_dma, ivsize, 0);
1923 sec4_sg_index += 1;
1924 sg_to_sec4_sg_last(req->dst, dst_nents,
1925 edesc->sec4_sg + sec4_sg_index, 0);
1926 }
1927
1928 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1929 sec4_sg_bytes, DMA_TO_DEVICE);
1930 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1931 dev_err(jrdev, "unable to map S/G table\n");
c73e36e8
HG
1932 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1933 iv_dma, ivsize, 0, 0);
1934 kfree(edesc);
7222d1a3
CV
1935 return ERR_PTR(-ENOMEM);
1936 }
1937 edesc->iv_dma = iv_dma;
1938
1939#ifdef DEBUG
1940 print_hex_dump(KERN_ERR,
1941 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1942 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1943 sec4_sg_bytes, 1);
1944#endif
1945
1946 *iv_contig_out = iv_contig;
1947 return edesc;
1948}
1949
1950static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1951{
1952 struct ablkcipher_request *req = &creq->creq;
1953 struct ablkcipher_edesc *edesc;
1954 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1955 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1956 struct device *jrdev = ctx->jrdev;
fd144d83 1957 bool iv_contig = false;
7222d1a3
CV
1958 u32 *desc;
1959 int ret = 0;
1960
1961 /* allocate extended descriptor */
1962 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
1963 CAAM_CMD_SZ, &iv_contig);
1964 if (IS_ERR(edesc))
1965 return PTR_ERR(edesc);
1966
1967 /* Create and submit job descriptor*/
1968 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1969 edesc, req, iv_contig);
1970#ifdef DEBUG
1971 print_hex_dump(KERN_ERR,
1972 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1973 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1974 desc_bytes(edesc->hw_desc), 1);
1975#endif
1976 desc = edesc->hw_desc;
1977 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1978
1979 if (!ret) {
1980 ret = -EINPROGRESS;
1981 } else {
1982 ablkcipher_unmap(jrdev, edesc, req);
1983 kfree(edesc);
1984 }
1985
1986 return ret;
1987}
1988
885e9e2f 1989#define template_aead template_u.aead
acdca31d 1990#define template_ablkcipher template_u.ablkcipher
8e8ec596
KP
1991struct caam_alg_template {
1992 char name[CRYPTO_MAX_ALG_NAME];
1993 char driver_name[CRYPTO_MAX_ALG_NAME];
1994 unsigned int blocksize;
885e9e2f
YK
1995 u32 type;
1996 union {
1997 struct ablkcipher_alg ablkcipher;
885e9e2f 1998 } template_u;
8e8ec596
KP
1999 u32 class1_alg_type;
2000 u32 class2_alg_type;
8e8ec596
KP
2001};
2002
2003static struct caam_alg_template driver_algs[] = {
479bcc7c 2004 /* ablkcipher descriptor */
ae4a825f 2005 {
479bcc7c
HX
2006 .name = "cbc(aes)",
2007 .driver_name = "cbc-aes-caam",
2008 .blocksize = AES_BLOCK_SIZE,
2009 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2010 .template_ablkcipher = {
2011 .setkey = ablkcipher_setkey,
2012 .encrypt = ablkcipher_encrypt,
2013 .decrypt = ablkcipher_decrypt,
2014 .givencrypt = ablkcipher_givencrypt,
2015 .geniv = "<built-in>",
2016 .min_keysize = AES_MIN_KEY_SIZE,
2017 .max_keysize = AES_MAX_KEY_SIZE,
2018 .ivsize = AES_BLOCK_SIZE,
2019 },
2020 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2021 },
2022 {
2023 .name = "cbc(des3_ede)",
2024 .driver_name = "cbc-3des-caam",
2025 .blocksize = DES3_EDE_BLOCK_SIZE,
2026 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2027 .template_ablkcipher = {
2028 .setkey = ablkcipher_setkey,
2029 .encrypt = ablkcipher_encrypt,
2030 .decrypt = ablkcipher_decrypt,
2031 .givencrypt = ablkcipher_givencrypt,
2032 .geniv = "<built-in>",
2033 .min_keysize = DES3_EDE_KEY_SIZE,
2034 .max_keysize = DES3_EDE_KEY_SIZE,
2035 .ivsize = DES3_EDE_BLOCK_SIZE,
2036 },
2037 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2038 },
2039 {
2040 .name = "cbc(des)",
2041 .driver_name = "cbc-des-caam",
2042 .blocksize = DES_BLOCK_SIZE,
2043 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2044 .template_ablkcipher = {
2045 .setkey = ablkcipher_setkey,
2046 .encrypt = ablkcipher_encrypt,
2047 .decrypt = ablkcipher_decrypt,
2048 .givencrypt = ablkcipher_givencrypt,
2049 .geniv = "<built-in>",
2050 .min_keysize = DES_KEY_SIZE,
2051 .max_keysize = DES_KEY_SIZE,
2052 .ivsize = DES_BLOCK_SIZE,
2053 },
2054 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2055 },
2056 {
2057 .name = "ctr(aes)",
2058 .driver_name = "ctr-aes-caam",
2059 .blocksize = 1,
2060 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2061 .template_ablkcipher = {
2062 .setkey = ablkcipher_setkey,
2063 .encrypt = ablkcipher_encrypt,
2064 .decrypt = ablkcipher_decrypt,
2065 .geniv = "chainiv",
2066 .min_keysize = AES_MIN_KEY_SIZE,
2067 .max_keysize = AES_MAX_KEY_SIZE,
2068 .ivsize = AES_BLOCK_SIZE,
2069 },
2070 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
2071 },
2072 {
2073 .name = "rfc3686(ctr(aes))",
2074 .driver_name = "rfc3686-ctr-aes-caam",
2075 .blocksize = 1,
2076 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
2077 .template_ablkcipher = {
2078 .setkey = ablkcipher_setkey,
2079 .encrypt = ablkcipher_encrypt,
2080 .decrypt = ablkcipher_decrypt,
2081 .givencrypt = ablkcipher_givencrypt,
ae4a825f 2082 .geniv = "<built-in>",
479bcc7c
HX
2083 .min_keysize = AES_MIN_KEY_SIZE +
2084 CTR_RFC3686_NONCE_SIZE,
2085 .max_keysize = AES_MAX_KEY_SIZE +
2086 CTR_RFC3686_NONCE_SIZE,
2087 .ivsize = CTR_RFC3686_IV_SIZE,
2088 },
2089 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
c6415a60
CV
2090 },
2091 {
2092 .name = "xts(aes)",
2093 .driver_name = "xts-aes-caam",
2094 .blocksize = AES_BLOCK_SIZE,
2095 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2096 .template_ablkcipher = {
2097 .setkey = xts_ablkcipher_setkey,
2098 .encrypt = ablkcipher_encrypt,
2099 .decrypt = ablkcipher_decrypt,
2100 .geniv = "eseqiv",
2101 .min_keysize = 2 * AES_MIN_KEY_SIZE,
2102 .max_keysize = 2 * AES_MAX_KEY_SIZE,
2103 .ivsize = AES_BLOCK_SIZE,
2104 },
2105 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
2106 },
479bcc7c
HX
2107};
2108
2109static struct caam_aead_alg driver_aeads[] = {
2110 {
2111 .aead = {
2112 .base = {
2113 .cra_name = "rfc4106(gcm(aes))",
2114 .cra_driver_name = "rfc4106-gcm-aes-caam",
2115 .cra_blocksize = 1,
2116 },
2117 .setkey = rfc4106_setkey,
2118 .setauthsize = rfc4106_setauthsize,
2119 .encrypt = ipsec_gcm_encrypt,
2120 .decrypt = ipsec_gcm_decrypt,
2121 .ivsize = 8,
2122 .maxauthsize = AES_BLOCK_SIZE,
2123 },
2124 .caam = {
2125 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2126 },
2127 },
2128 {
2129 .aead = {
2130 .base = {
2131 .cra_name = "rfc4543(gcm(aes))",
2132 .cra_driver_name = "rfc4543-gcm-aes-caam",
2133 .cra_blocksize = 1,
2134 },
2135 .setkey = rfc4543_setkey,
2136 .setauthsize = rfc4543_setauthsize,
2137 .encrypt = ipsec_gcm_encrypt,
2138 .decrypt = ipsec_gcm_decrypt,
2139 .ivsize = 8,
2140 .maxauthsize = AES_BLOCK_SIZE,
2141 },
2142 .caam = {
2143 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2144 },
2145 },
2146 /* Galois Counter Mode */
2147 {
2148 .aead = {
2149 .base = {
2150 .cra_name = "gcm(aes)",
2151 .cra_driver_name = "gcm-aes-caam",
2152 .cra_blocksize = 1,
2153 },
2154 .setkey = gcm_setkey,
2155 .setauthsize = gcm_setauthsize,
2156 .encrypt = gcm_encrypt,
2157 .decrypt = gcm_decrypt,
2158 .ivsize = 12,
2159 .maxauthsize = AES_BLOCK_SIZE,
2160 },
2161 .caam = {
2162 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2163 },
2164 },
2165 /* single-pass ipsec_esp descriptor */
2166 {
2167 .aead = {
2168 .base = {
2169 .cra_name = "authenc(hmac(md5),"
2170 "ecb(cipher_null))",
2171 .cra_driver_name = "authenc-hmac-md5-"
2172 "ecb-cipher_null-caam",
2173 .cra_blocksize = NULL_BLOCK_SIZE,
2174 },
2175 .setkey = aead_setkey,
2176 .setauthsize = aead_setauthsize,
2177 .encrypt = aead_encrypt,
2178 .decrypt = aead_decrypt,
ae4a825f 2179 .ivsize = NULL_IV_SIZE,
479bcc7c
HX
2180 .maxauthsize = MD5_DIGEST_SIZE,
2181 },
2182 .caam = {
2183 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2184 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2185 },
2186 },
2187 {
2188 .aead = {
2189 .base = {
2190 .cra_name = "authenc(hmac(sha1),"
2191 "ecb(cipher_null))",
2192 .cra_driver_name = "authenc-hmac-sha1-"
2193 "ecb-cipher_null-caam",
2194 .cra_blocksize = NULL_BLOCK_SIZE,
ae4a825f 2195 },
479bcc7c
HX
2196 .setkey = aead_setkey,
2197 .setauthsize = aead_setauthsize,
2198 .encrypt = aead_encrypt,
2199 .decrypt = aead_decrypt,
2200 .ivsize = NULL_IV_SIZE,
2201 .maxauthsize = SHA1_DIGEST_SIZE,
2202 },
2203 .caam = {
2204 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2205 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2206 },
ae4a825f
HG
2207 },
2208 {
479bcc7c
HX
2209 .aead = {
2210 .base = {
2211 .cra_name = "authenc(hmac(sha224),"
2212 "ecb(cipher_null))",
2213 .cra_driver_name = "authenc-hmac-sha224-"
2214 "ecb-cipher_null-caam",
2215 .cra_blocksize = NULL_BLOCK_SIZE,
2216 },
ae4a825f
HG
2217 .setkey = aead_setkey,
2218 .setauthsize = aead_setauthsize,
479bcc7c
HX
2219 .encrypt = aead_encrypt,
2220 .decrypt = aead_decrypt,
ae4a825f
HG
2221 .ivsize = NULL_IV_SIZE,
2222 .maxauthsize = SHA224_DIGEST_SIZE,
479bcc7c
HX
2223 },
2224 .caam = {
2225 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2226 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2227 },
ae4a825f
HG
2228 },
2229 {
479bcc7c
HX
2230 .aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(sha256),"
2233 "ecb(cipher_null))",
2234 .cra_driver_name = "authenc-hmac-sha256-"
2235 "ecb-cipher_null-caam",
2236 .cra_blocksize = NULL_BLOCK_SIZE,
2237 },
ae4a825f
HG
2238 .setkey = aead_setkey,
2239 .setauthsize = aead_setauthsize,
479bcc7c
HX
2240 .encrypt = aead_encrypt,
2241 .decrypt = aead_decrypt,
ae4a825f
HG
2242 .ivsize = NULL_IV_SIZE,
2243 .maxauthsize = SHA256_DIGEST_SIZE,
479bcc7c
HX
2244 },
2245 .caam = {
2246 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2247 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2248 },
ae4a825f
HG
2249 },
2250 {
479bcc7c
HX
2251 .aead = {
2252 .base = {
2253 .cra_name = "authenc(hmac(sha384),"
2254 "ecb(cipher_null))",
2255 .cra_driver_name = "authenc-hmac-sha384-"
2256 "ecb-cipher_null-caam",
2257 .cra_blocksize = NULL_BLOCK_SIZE,
2258 },
ae4a825f
HG
2259 .setkey = aead_setkey,
2260 .setauthsize = aead_setauthsize,
479bcc7c
HX
2261 .encrypt = aead_encrypt,
2262 .decrypt = aead_decrypt,
ae4a825f
HG
2263 .ivsize = NULL_IV_SIZE,
2264 .maxauthsize = SHA384_DIGEST_SIZE,
479bcc7c
HX
2265 },
2266 .caam = {
2267 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2268 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2269 },
ae4a825f
HG
2270 },
2271 {
479bcc7c
HX
2272 .aead = {
2273 .base = {
2274 .cra_name = "authenc(hmac(sha512),"
2275 "ecb(cipher_null))",
2276 .cra_driver_name = "authenc-hmac-sha512-"
2277 "ecb-cipher_null-caam",
2278 .cra_blocksize = NULL_BLOCK_SIZE,
2279 },
ae4a825f
HG
2280 .setkey = aead_setkey,
2281 .setauthsize = aead_setauthsize,
479bcc7c
HX
2282 .encrypt = aead_encrypt,
2283 .decrypt = aead_decrypt,
ae4a825f
HG
2284 .ivsize = NULL_IV_SIZE,
2285 .maxauthsize = SHA512_DIGEST_SIZE,
479bcc7c
HX
2286 },
2287 .caam = {
2288 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2289 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2290 },
2291 },
2292 {
2293 .aead = {
2294 .base = {
2295 .cra_name = "authenc(hmac(md5),cbc(aes))",
2296 .cra_driver_name = "authenc-hmac-md5-"
2297 "cbc-aes-caam",
2298 .cra_blocksize = AES_BLOCK_SIZE,
ae4a825f 2299 },
479bcc7c
HX
2300 .setkey = aead_setkey,
2301 .setauthsize = aead_setauthsize,
2302 .encrypt = aead_encrypt,
2303 .decrypt = aead_decrypt,
2304 .ivsize = AES_BLOCK_SIZE,
2305 .maxauthsize = MD5_DIGEST_SIZE,
2306 },
2307 .caam = {
2308 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2309 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2310 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2311 },
ae4a825f 2312 },
8b4d43a4 2313 {
479bcc7c
HX
2314 .aead = {
2315 .base = {
2316 .cra_name = "echainiv(authenc(hmac(md5),"
2317 "cbc(aes)))",
2318 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2319 "cbc-aes-caam",
2320 .cra_blocksize = AES_BLOCK_SIZE,
2321 },
8b4d43a4
KP
2322 .setkey = aead_setkey,
2323 .setauthsize = aead_setauthsize,
479bcc7c 2324 .encrypt = aead_encrypt,
8b18e235 2325 .decrypt = aead_decrypt,
8b4d43a4
KP
2326 .ivsize = AES_BLOCK_SIZE,
2327 .maxauthsize = MD5_DIGEST_SIZE,
479bcc7c
HX
2328 },
2329 .caam = {
2330 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2331 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2332 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2333 .geniv = true,
2334 },
2335 },
2336 {
2337 .aead = {
2338 .base = {
2339 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2340 .cra_driver_name = "authenc-hmac-sha1-"
2341 "cbc-aes-caam",
2342 .cra_blocksize = AES_BLOCK_SIZE,
8b4d43a4 2343 },
479bcc7c
HX
2344 .setkey = aead_setkey,
2345 .setauthsize = aead_setauthsize,
2346 .encrypt = aead_encrypt,
2347 .decrypt = aead_decrypt,
2348 .ivsize = AES_BLOCK_SIZE,
2349 .maxauthsize = SHA1_DIGEST_SIZE,
2350 },
2351 .caam = {
2352 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2353 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2354 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2355 },
8b4d43a4 2356 },
8e8ec596 2357 {
479bcc7c
HX
2358 .aead = {
2359 .base = {
2360 .cra_name = "echainiv(authenc(hmac(sha1),"
2361 "cbc(aes)))",
2362 .cra_driver_name = "echainiv-authenc-"
2363 "hmac-sha1-cbc-aes-caam",
2364 .cra_blocksize = AES_BLOCK_SIZE,
2365 },
0e479300
YK
2366 .setkey = aead_setkey,
2367 .setauthsize = aead_setauthsize,
479bcc7c 2368 .encrypt = aead_encrypt,
8b18e235 2369 .decrypt = aead_decrypt,
8e8ec596
KP
2370 .ivsize = AES_BLOCK_SIZE,
2371 .maxauthsize = SHA1_DIGEST_SIZE,
479bcc7c
HX
2372 },
2373 .caam = {
2374 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2375 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2376 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2377 .geniv = true,
2378 },
2379 },
2380 {
2381 .aead = {
2382 .base = {
2383 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2385 "cbc-aes-caam",
2386 .cra_blocksize = AES_BLOCK_SIZE,
8e8ec596 2387 },
479bcc7c
HX
2388 .setkey = aead_setkey,
2389 .setauthsize = aead_setauthsize,
2390 .encrypt = aead_encrypt,
2391 .decrypt = aead_decrypt,
2392 .ivsize = AES_BLOCK_SIZE,
2393 .maxauthsize = SHA224_DIGEST_SIZE,
2394 },
2395 .caam = {
2396 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2397 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2398 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2399 },
8e8ec596 2400 },
e863f9cc 2401 {
479bcc7c
HX
2402 .aead = {
2403 .base = {
2404 .cra_name = "echainiv(authenc(hmac(sha224),"
2405 "cbc(aes)))",
2406 .cra_driver_name = "echainiv-authenc-"
2407 "hmac-sha224-cbc-aes-caam",
2408 .cra_blocksize = AES_BLOCK_SIZE,
2409 },
e863f9cc
HA
2410 .setkey = aead_setkey,
2411 .setauthsize = aead_setauthsize,
479bcc7c 2412 .encrypt = aead_encrypt,
8b18e235 2413 .decrypt = aead_decrypt,
e863f9cc
HA
2414 .ivsize = AES_BLOCK_SIZE,
2415 .maxauthsize = SHA224_DIGEST_SIZE,
479bcc7c
HX
2416 },
2417 .caam = {
2418 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2419 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2420 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2421 .geniv = true,
2422 },
2423 },
2424 {
2425 .aead = {
2426 .base = {
2427 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2428 .cra_driver_name = "authenc-hmac-sha256-"
2429 "cbc-aes-caam",
2430 .cra_blocksize = AES_BLOCK_SIZE,
e863f9cc 2431 },
479bcc7c
HX
2432 .setkey = aead_setkey,
2433 .setauthsize = aead_setauthsize,
2434 .encrypt = aead_encrypt,
2435 .decrypt = aead_decrypt,
2436 .ivsize = AES_BLOCK_SIZE,
2437 .maxauthsize = SHA256_DIGEST_SIZE,
2438 },
2439 .caam = {
2440 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2441 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2442 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2443 },
e863f9cc 2444 },
8e8ec596 2445 {
479bcc7c
HX
2446 .aead = {
2447 .base = {
2448 .cra_name = "echainiv(authenc(hmac(sha256),"
2449 "cbc(aes)))",
2450 .cra_driver_name = "echainiv-authenc-"
2451 "hmac-sha256-cbc-aes-caam",
2452 .cra_blocksize = AES_BLOCK_SIZE,
2453 },
2454 .setkey = aead_setkey,
2455 .setauthsize = aead_setauthsize,
2456 .encrypt = aead_encrypt,
8b18e235 2457 .decrypt = aead_decrypt,
479bcc7c
HX
2458 .ivsize = AES_BLOCK_SIZE,
2459 .maxauthsize = SHA256_DIGEST_SIZE,
2460 },
2461 .caam = {
2462 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2463 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2464 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2465 .geniv = true,
2466 },
2467 },
2468 {
2469 .aead = {
2470 .base = {
2471 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2472 .cra_driver_name = "authenc-hmac-sha384-"
2473 "cbc-aes-caam",
2474 .cra_blocksize = AES_BLOCK_SIZE,
2475 },
2476 .setkey = aead_setkey,
2477 .setauthsize = aead_setauthsize,
2478 .encrypt = aead_encrypt,
2479 .decrypt = aead_decrypt,
2480 .ivsize = AES_BLOCK_SIZE,
2481 .maxauthsize = SHA384_DIGEST_SIZE,
2482 },
2483 .caam = {
2484 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2485 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2486 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2487 },
2488 },
2489 {
2490 .aead = {
2491 .base = {
2492 .cra_name = "echainiv(authenc(hmac(sha384),"
2493 "cbc(aes)))",
2494 .cra_driver_name = "echainiv-authenc-"
2495 "hmac-sha384-cbc-aes-caam",
2496 .cra_blocksize = AES_BLOCK_SIZE,
2497 },
0e479300
YK
2498 .setkey = aead_setkey,
2499 .setauthsize = aead_setauthsize,
479bcc7c 2500 .encrypt = aead_encrypt,
8b18e235 2501 .decrypt = aead_decrypt,
8e8ec596 2502 .ivsize = AES_BLOCK_SIZE,
479bcc7c
HX
2503 .maxauthsize = SHA384_DIGEST_SIZE,
2504 },
2505 .caam = {
2506 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2507 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2508 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2509 .geniv = true,
2510 },
8e8ec596 2511 },
e863f9cc 2512 {
479bcc7c
HX
2513 .aead = {
2514 .base = {
2515 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2516 .cra_driver_name = "authenc-hmac-sha512-"
2517 "cbc-aes-caam",
2518 .cra_blocksize = AES_BLOCK_SIZE,
2519 },
e863f9cc
HA
2520 .setkey = aead_setkey,
2521 .setauthsize = aead_setauthsize,
479bcc7c
HX
2522 .encrypt = aead_encrypt,
2523 .decrypt = aead_decrypt,
e863f9cc 2524 .ivsize = AES_BLOCK_SIZE,
479bcc7c
HX
2525 .maxauthsize = SHA512_DIGEST_SIZE,
2526 },
2527 .caam = {
2528 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2529 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2530 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2531 },
e863f9cc 2532 },
4427b1b4 2533 {
479bcc7c
HX
2534 .aead = {
2535 .base = {
2536 .cra_name = "echainiv(authenc(hmac(sha512),"
2537 "cbc(aes)))",
2538 .cra_driver_name = "echainiv-authenc-"
2539 "hmac-sha512-cbc-aes-caam",
2540 .cra_blocksize = AES_BLOCK_SIZE,
2541 },
0e479300
YK
2542 .setkey = aead_setkey,
2543 .setauthsize = aead_setauthsize,
479bcc7c 2544 .encrypt = aead_encrypt,
8b18e235 2545 .decrypt = aead_decrypt,
4427b1b4
KP
2546 .ivsize = AES_BLOCK_SIZE,
2547 .maxauthsize = SHA512_DIGEST_SIZE,
479bcc7c
HX
2548 },
2549 .caam = {
2550 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2551 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2552 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2553 .geniv = true,
2554 },
2555 },
2556 {
2557 .aead = {
2558 .base = {
2559 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2560 .cra_driver_name = "authenc-hmac-md5-"
2561 "cbc-des3_ede-caam",
2562 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
4427b1b4 2563 },
479bcc7c
HX
2564 .setkey = aead_setkey,
2565 .setauthsize = aead_setauthsize,
2566 .encrypt = aead_encrypt,
2567 .decrypt = aead_decrypt,
2568 .ivsize = DES3_EDE_BLOCK_SIZE,
2569 .maxauthsize = MD5_DIGEST_SIZE,
2570 },
2571 .caam = {
2572 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2573 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2574 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2575 }
4427b1b4 2576 },
8b4d43a4 2577 {
479bcc7c
HX
2578 .aead = {
2579 .base = {
2580 .cra_name = "echainiv(authenc(hmac(md5),"
2581 "cbc(des3_ede)))",
2582 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2583 "cbc-des3_ede-caam",
2584 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2585 },
8b4d43a4
KP
2586 .setkey = aead_setkey,
2587 .setauthsize = aead_setauthsize,
479bcc7c 2588 .encrypt = aead_encrypt,
8b18e235 2589 .decrypt = aead_decrypt,
8b4d43a4
KP
2590 .ivsize = DES3_EDE_BLOCK_SIZE,
2591 .maxauthsize = MD5_DIGEST_SIZE,
479bcc7c
HX
2592 },
2593 .caam = {
2594 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2595 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2596 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2597 .geniv = true,
2598 }
2599 },
2600 {
2601 .aead = {
2602 .base = {
2603 .cra_name = "authenc(hmac(sha1),"
2604 "cbc(des3_ede))",
2605 .cra_driver_name = "authenc-hmac-sha1-"
2606 "cbc-des3_ede-caam",
2607 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
8b4d43a4 2608 },
479bcc7c
HX
2609 .setkey = aead_setkey,
2610 .setauthsize = aead_setauthsize,
2611 .encrypt = aead_encrypt,
2612 .decrypt = aead_decrypt,
2613 .ivsize = DES3_EDE_BLOCK_SIZE,
2614 .maxauthsize = SHA1_DIGEST_SIZE,
2615 },
2616 .caam = {
2617 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2618 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2619 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2620 },
8b4d43a4 2621 },
8e8ec596 2622 {
479bcc7c
HX
2623 .aead = {
2624 .base = {
2625 .cra_name = "echainiv(authenc(hmac(sha1),"
2626 "cbc(des3_ede)))",
2627 .cra_driver_name = "echainiv-authenc-"
2628 "hmac-sha1-"
2629 "cbc-des3_ede-caam",
2630 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2631 },
0e479300
YK
2632 .setkey = aead_setkey,
2633 .setauthsize = aead_setauthsize,
479bcc7c 2634 .encrypt = aead_encrypt,
8b18e235 2635 .decrypt = aead_decrypt,
8e8ec596
KP
2636 .ivsize = DES3_EDE_BLOCK_SIZE,
2637 .maxauthsize = SHA1_DIGEST_SIZE,
479bcc7c
HX
2638 },
2639 .caam = {
2640 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2641 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2642 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2643 .geniv = true,
2644 },
2645 },
2646 {
2647 .aead = {
2648 .base = {
2649 .cra_name = "authenc(hmac(sha224),"
2650 "cbc(des3_ede))",
2651 .cra_driver_name = "authenc-hmac-sha224-"
2652 "cbc-des3_ede-caam",
2653 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
8e8ec596 2654 },
479bcc7c
HX
2655 .setkey = aead_setkey,
2656 .setauthsize = aead_setauthsize,
2657 .encrypt = aead_encrypt,
2658 .decrypt = aead_decrypt,
2659 .ivsize = DES3_EDE_BLOCK_SIZE,
2660 .maxauthsize = SHA224_DIGEST_SIZE,
2661 },
2662 .caam = {
2663 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2664 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2665 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2666 },
8e8ec596 2667 },
e863f9cc 2668 {
479bcc7c
HX
2669 .aead = {
2670 .base = {
2671 .cra_name = "echainiv(authenc(hmac(sha224),"
2672 "cbc(des3_ede)))",
2673 .cra_driver_name = "echainiv-authenc-"
2674 "hmac-sha224-"
2675 "cbc-des3_ede-caam",
2676 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2677 },
e863f9cc
HA
2678 .setkey = aead_setkey,
2679 .setauthsize = aead_setauthsize,
479bcc7c 2680 .encrypt = aead_encrypt,
8b18e235 2681 .decrypt = aead_decrypt,
e863f9cc
HA
2682 .ivsize = DES3_EDE_BLOCK_SIZE,
2683 .maxauthsize = SHA224_DIGEST_SIZE,
479bcc7c
HX
2684 },
2685 .caam = {
2686 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2687 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2688 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2689 .geniv = true,
2690 },
2691 },
2692 {
2693 .aead = {
2694 .base = {
2695 .cra_name = "authenc(hmac(sha256),"
2696 "cbc(des3_ede))",
2697 .cra_driver_name = "authenc-hmac-sha256-"
2698 "cbc-des3_ede-caam",
2699 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
e863f9cc 2700 },
479bcc7c
HX
2701 .setkey = aead_setkey,
2702 .setauthsize = aead_setauthsize,
2703 .encrypt = aead_encrypt,
2704 .decrypt = aead_decrypt,
2705 .ivsize = DES3_EDE_BLOCK_SIZE,
2706 .maxauthsize = SHA256_DIGEST_SIZE,
2707 },
2708 .caam = {
2709 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2710 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2711 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2712 },
e863f9cc 2713 },
8e8ec596 2714 {
479bcc7c
HX
2715 .aead = {
2716 .base = {
2717 .cra_name = "echainiv(authenc(hmac(sha256),"
2718 "cbc(des3_ede)))",
2719 .cra_driver_name = "echainiv-authenc-"
2720 "hmac-sha256-"
2721 "cbc-des3_ede-caam",
2722 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2723 },
0e479300
YK
2724 .setkey = aead_setkey,
2725 .setauthsize = aead_setauthsize,
479bcc7c 2726 .encrypt = aead_encrypt,
8b18e235 2727 .decrypt = aead_decrypt,
8e8ec596
KP
2728 .ivsize = DES3_EDE_BLOCK_SIZE,
2729 .maxauthsize = SHA256_DIGEST_SIZE,
479bcc7c
HX
2730 },
2731 .caam = {
2732 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2733 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2734 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2735 .geniv = true,
2736 },
2737 },
2738 {
2739 .aead = {
2740 .base = {
2741 .cra_name = "authenc(hmac(sha384),"
2742 "cbc(des3_ede))",
2743 .cra_driver_name = "authenc-hmac-sha384-"
2744 "cbc-des3_ede-caam",
2745 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
8e8ec596 2746 },
479bcc7c
HX
2747 .setkey = aead_setkey,
2748 .setauthsize = aead_setauthsize,
2749 .encrypt = aead_encrypt,
2750 .decrypt = aead_decrypt,
2751 .ivsize = DES3_EDE_BLOCK_SIZE,
2752 .maxauthsize = SHA384_DIGEST_SIZE,
2753 },
2754 .caam = {
2755 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2756 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2757 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2758 },
8e8ec596 2759 },
e863f9cc 2760 {
479bcc7c
HX
2761 .aead = {
2762 .base = {
2763 .cra_name = "echainiv(authenc(hmac(sha384),"
2764 "cbc(des3_ede)))",
2765 .cra_driver_name = "echainiv-authenc-"
2766 "hmac-sha384-"
2767 "cbc-des3_ede-caam",
2768 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2769 },
e863f9cc
HA
2770 .setkey = aead_setkey,
2771 .setauthsize = aead_setauthsize,
479bcc7c 2772 .encrypt = aead_encrypt,
8b18e235 2773 .decrypt = aead_decrypt,
e863f9cc
HA
2774 .ivsize = DES3_EDE_BLOCK_SIZE,
2775 .maxauthsize = SHA384_DIGEST_SIZE,
479bcc7c
HX
2776 },
2777 .caam = {
2778 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2779 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2780 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2781 .geniv = true,
2782 },
2783 },
2784 {
2785 .aead = {
2786 .base = {
2787 .cra_name = "authenc(hmac(sha512),"
2788 "cbc(des3_ede))",
2789 .cra_driver_name = "authenc-hmac-sha512-"
2790 "cbc-des3_ede-caam",
2791 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
e863f9cc 2792 },
479bcc7c
HX
2793 .setkey = aead_setkey,
2794 .setauthsize = aead_setauthsize,
2795 .encrypt = aead_encrypt,
2796 .decrypt = aead_decrypt,
2797 .ivsize = DES3_EDE_BLOCK_SIZE,
2798 .maxauthsize = SHA512_DIGEST_SIZE,
2799 },
2800 .caam = {
2801 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2802 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2803 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2804 },
e863f9cc 2805 },
4427b1b4 2806 {
479bcc7c
HX
2807 .aead = {
2808 .base = {
2809 .cra_name = "echainiv(authenc(hmac(sha512),"
2810 "cbc(des3_ede)))",
2811 .cra_driver_name = "echainiv-authenc-"
2812 "hmac-sha512-"
2813 "cbc-des3_ede-caam",
2814 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2815 },
0e479300
YK
2816 .setkey = aead_setkey,
2817 .setauthsize = aead_setauthsize,
479bcc7c 2818 .encrypt = aead_encrypt,
8b18e235 2819 .decrypt = aead_decrypt,
4427b1b4
KP
2820 .ivsize = DES3_EDE_BLOCK_SIZE,
2821 .maxauthsize = SHA512_DIGEST_SIZE,
479bcc7c
HX
2822 },
2823 .caam = {
2824 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2825 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2826 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2827 .geniv = true,
2828 },
2829 },
2830 {
2831 .aead = {
2832 .base = {
2833 .cra_name = "authenc(hmac(md5),cbc(des))",
2834 .cra_driver_name = "authenc-hmac-md5-"
2835 "cbc-des-caam",
2836 .cra_blocksize = DES_BLOCK_SIZE,
4427b1b4 2837 },
479bcc7c
HX
2838 .setkey = aead_setkey,
2839 .setauthsize = aead_setauthsize,
2840 .encrypt = aead_encrypt,
2841 .decrypt = aead_decrypt,
2842 .ivsize = DES_BLOCK_SIZE,
2843 .maxauthsize = MD5_DIGEST_SIZE,
2844 },
2845 .caam = {
2846 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2847 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2848 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2849 },
4427b1b4 2850 },
8b4d43a4 2851 {
479bcc7c
HX
2852 .aead = {
2853 .base = {
2854 .cra_name = "echainiv(authenc(hmac(md5),"
2855 "cbc(des)))",
2856 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2857 "cbc-des-caam",
2858 .cra_blocksize = DES_BLOCK_SIZE,
2859 },
8b4d43a4
KP
2860 .setkey = aead_setkey,
2861 .setauthsize = aead_setauthsize,
479bcc7c 2862 .encrypt = aead_encrypt,
8b18e235 2863 .decrypt = aead_decrypt,
8b4d43a4
KP
2864 .ivsize = DES_BLOCK_SIZE,
2865 .maxauthsize = MD5_DIGEST_SIZE,
479bcc7c
HX
2866 },
2867 .caam = {
2868 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2869 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2870 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2871 .geniv = true,
2872 },
2873 },
2874 {
2875 .aead = {
2876 .base = {
2877 .cra_name = "authenc(hmac(sha1),cbc(des))",
2878 .cra_driver_name = "authenc-hmac-sha1-"
2879 "cbc-des-caam",
2880 .cra_blocksize = DES_BLOCK_SIZE,
8b4d43a4 2881 },
479bcc7c
HX
2882 .setkey = aead_setkey,
2883 .setauthsize = aead_setauthsize,
2884 .encrypt = aead_encrypt,
2885 .decrypt = aead_decrypt,
2886 .ivsize = DES_BLOCK_SIZE,
2887 .maxauthsize = SHA1_DIGEST_SIZE,
2888 },
2889 .caam = {
2890 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2891 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2892 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2893 },
8b4d43a4 2894 },
8e8ec596 2895 {
479bcc7c
HX
2896 .aead = {
2897 .base = {
2898 .cra_name = "echainiv(authenc(hmac(sha1),"
2899 "cbc(des)))",
2900 .cra_driver_name = "echainiv-authenc-"
2901 "hmac-sha1-cbc-des-caam",
2902 .cra_blocksize = DES_BLOCK_SIZE,
2903 },
0e479300
YK
2904 .setkey = aead_setkey,
2905 .setauthsize = aead_setauthsize,
479bcc7c 2906 .encrypt = aead_encrypt,
8b18e235 2907 .decrypt = aead_decrypt,
8e8ec596
KP
2908 .ivsize = DES_BLOCK_SIZE,
2909 .maxauthsize = SHA1_DIGEST_SIZE,
479bcc7c
HX
2910 },
2911 .caam = {
2912 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2913 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2914 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2915 .geniv = true,
2916 },
2917 },
2918 {
2919 .aead = {
2920 .base = {
2921 .cra_name = "authenc(hmac(sha224),cbc(des))",
2922 .cra_driver_name = "authenc-hmac-sha224-"
2923 "cbc-des-caam",
2924 .cra_blocksize = DES_BLOCK_SIZE,
8e8ec596 2925 },
479bcc7c
HX
2926 .setkey = aead_setkey,
2927 .setauthsize = aead_setauthsize,
2928 .encrypt = aead_encrypt,
2929 .decrypt = aead_decrypt,
2930 .ivsize = DES_BLOCK_SIZE,
2931 .maxauthsize = SHA224_DIGEST_SIZE,
2932 },
2933 .caam = {
2934 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2935 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2936 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2937 },
8e8ec596 2938 },
e863f9cc 2939 {
479bcc7c
HX
2940 .aead = {
2941 .base = {
2942 .cra_name = "echainiv(authenc(hmac(sha224),"
2943 "cbc(des)))",
2944 .cra_driver_name = "echainiv-authenc-"
2945 "hmac-sha224-cbc-des-caam",
2946 .cra_blocksize = DES_BLOCK_SIZE,
2947 },
e863f9cc
HA
2948 .setkey = aead_setkey,
2949 .setauthsize = aead_setauthsize,
479bcc7c 2950 .encrypt = aead_encrypt,
8b18e235 2951 .decrypt = aead_decrypt,
e863f9cc
HA
2952 .ivsize = DES_BLOCK_SIZE,
2953 .maxauthsize = SHA224_DIGEST_SIZE,
479bcc7c
HX
2954 },
2955 .caam = {
2956 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2957 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2958 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
2959 .geniv = true,
2960 },
2961 },
2962 {
2963 .aead = {
2964 .base = {
2965 .cra_name = "authenc(hmac(sha256),cbc(des))",
2966 .cra_driver_name = "authenc-hmac-sha256-"
2967 "cbc-des-caam",
2968 .cra_blocksize = DES_BLOCK_SIZE,
e863f9cc 2969 },
479bcc7c
HX
2970 .setkey = aead_setkey,
2971 .setauthsize = aead_setauthsize,
2972 .encrypt = aead_encrypt,
2973 .decrypt = aead_decrypt,
2974 .ivsize = DES_BLOCK_SIZE,
2975 .maxauthsize = SHA256_DIGEST_SIZE,
2976 },
2977 .caam = {
2978 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2979 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2980 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 2981 },
e863f9cc 2982 },
8e8ec596 2983 {
479bcc7c
HX
2984 .aead = {
2985 .base = {
2986 .cra_name = "echainiv(authenc(hmac(sha256),"
2987 "cbc(des)))",
2988 .cra_driver_name = "echainiv-authenc-"
2989 "hmac-sha256-cbc-des-caam",
2990 .cra_blocksize = DES_BLOCK_SIZE,
2991 },
0e479300
YK
2992 .setkey = aead_setkey,
2993 .setauthsize = aead_setauthsize,
479bcc7c 2994 .encrypt = aead_encrypt,
8b18e235 2995 .decrypt = aead_decrypt,
8e8ec596
KP
2996 .ivsize = DES_BLOCK_SIZE,
2997 .maxauthsize = SHA256_DIGEST_SIZE,
479bcc7c
HX
2998 },
2999 .caam = {
3000 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3001 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3002 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3003 .geniv = true,
3004 },
3005 },
3006 {
3007 .aead = {
3008 .base = {
3009 .cra_name = "authenc(hmac(sha384),cbc(des))",
3010 .cra_driver_name = "authenc-hmac-sha384-"
3011 "cbc-des-caam",
3012 .cra_blocksize = DES_BLOCK_SIZE,
8e8ec596 3013 },
479bcc7c
HX
3014 .setkey = aead_setkey,
3015 .setauthsize = aead_setauthsize,
3016 .encrypt = aead_encrypt,
3017 .decrypt = aead_decrypt,
3018 .ivsize = DES_BLOCK_SIZE,
3019 .maxauthsize = SHA384_DIGEST_SIZE,
3020 },
3021 .caam = {
3022 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3023 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3024 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 3025 },
8e8ec596 3026 },
e863f9cc 3027 {
479bcc7c
HX
3028 .aead = {
3029 .base = {
3030 .cra_name = "echainiv(authenc(hmac(sha384),"
3031 "cbc(des)))",
3032 .cra_driver_name = "echainiv-authenc-"
3033 "hmac-sha384-cbc-des-caam",
3034 .cra_blocksize = DES_BLOCK_SIZE,
3035 },
e863f9cc
HA
3036 .setkey = aead_setkey,
3037 .setauthsize = aead_setauthsize,
479bcc7c 3038 .encrypt = aead_encrypt,
8b18e235 3039 .decrypt = aead_decrypt,
e863f9cc
HA
3040 .ivsize = DES_BLOCK_SIZE,
3041 .maxauthsize = SHA384_DIGEST_SIZE,
479bcc7c
HX
3042 },
3043 .caam = {
3044 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3045 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3046 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3047 .geniv = true,
3048 },
3049 },
3050 {
3051 .aead = {
3052 .base = {
3053 .cra_name = "authenc(hmac(sha512),cbc(des))",
3054 .cra_driver_name = "authenc-hmac-sha512-"
3055 "cbc-des-caam",
3056 .cra_blocksize = DES_BLOCK_SIZE,
e863f9cc 3057 },
479bcc7c
HX
3058 .setkey = aead_setkey,
3059 .setauthsize = aead_setauthsize,
3060 .encrypt = aead_encrypt,
3061 .decrypt = aead_decrypt,
3062 .ivsize = DES_BLOCK_SIZE,
3063 .maxauthsize = SHA512_DIGEST_SIZE,
3064 },
3065 .caam = {
3066 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3067 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3068 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 3069 },
e863f9cc 3070 },
4427b1b4 3071 {
479bcc7c
HX
3072 .aead = {
3073 .base = {
3074 .cra_name = "echainiv(authenc(hmac(sha512),"
3075 "cbc(des)))",
3076 .cra_driver_name = "echainiv-authenc-"
3077 "hmac-sha512-cbc-des-caam",
3078 .cra_blocksize = DES_BLOCK_SIZE,
3079 },
0e479300
YK
3080 .setkey = aead_setkey,
3081 .setauthsize = aead_setauthsize,
479bcc7c 3082 .encrypt = aead_encrypt,
8b18e235 3083 .decrypt = aead_decrypt,
4427b1b4
KP
3084 .ivsize = DES_BLOCK_SIZE,
3085 .maxauthsize = SHA512_DIGEST_SIZE,
479bcc7c
HX
3086 },
3087 .caam = {
3088 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
3089 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3090 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3091 .geniv = true,
3092 },
4427b1b4 3093 },
daebc465 3094 {
479bcc7c
HX
3095 .aead = {
3096 .base = {
3097 .cra_name = "authenc(hmac(md5),"
3098 "rfc3686(ctr(aes)))",
3099 .cra_driver_name = "authenc-hmac-md5-"
3100 "rfc3686-ctr-aes-caam",
3101 .cra_blocksize = 1,
3102 },
daebc465
CV
3103 .setkey = aead_setkey,
3104 .setauthsize = aead_setauthsize,
479bcc7c
HX
3105 .encrypt = aead_encrypt,
3106 .decrypt = aead_decrypt,
daebc465
CV
3107 .ivsize = CTR_RFC3686_IV_SIZE,
3108 .maxauthsize = MD5_DIGEST_SIZE,
479bcc7c
HX
3109 },
3110 .caam = {
3111 .class1_alg_type = OP_ALG_ALGSEL_AES |
3112 OP_ALG_AAI_CTR_MOD128,
3113 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3114 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3115 .rfc3686 = true,
3116 },
daebc465
CV
3117 },
3118 {
479bcc7c
HX
3119 .aead = {
3120 .base = {
3121 .cra_name = "seqiv(authenc("
3122 "hmac(md5),rfc3686(ctr(aes))))",
3123 .cra_driver_name = "seqiv-authenc-hmac-md5-"
3124 "rfc3686-ctr-aes-caam",
3125 .cra_blocksize = 1,
3126 },
daebc465
CV
3127 .setkey = aead_setkey,
3128 .setauthsize = aead_setauthsize,
479bcc7c 3129 .encrypt = aead_encrypt,
8b18e235 3130 .decrypt = aead_decrypt,
daebc465 3131 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3132 .maxauthsize = MD5_DIGEST_SIZE,
3133 },
3134 .caam = {
3135 .class1_alg_type = OP_ALG_ALGSEL_AES |
3136 OP_ALG_AAI_CTR_MOD128,
3137 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
3138 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3139 .rfc3686 = true,
3140 .geniv = true,
3141 },
daebc465
CV
3142 },
3143 {
479bcc7c
HX
3144 .aead = {
3145 .base = {
3146 .cra_name = "authenc(hmac(sha1),"
3147 "rfc3686(ctr(aes)))",
3148 .cra_driver_name = "authenc-hmac-sha1-"
3149 "rfc3686-ctr-aes-caam",
3150 .cra_blocksize = 1,
3151 },
daebc465
CV
3152 .setkey = aead_setkey,
3153 .setauthsize = aead_setauthsize,
479bcc7c
HX
3154 .encrypt = aead_encrypt,
3155 .decrypt = aead_decrypt,
daebc465 3156 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3157 .maxauthsize = SHA1_DIGEST_SIZE,
3158 },
3159 .caam = {
3160 .class1_alg_type = OP_ALG_ALGSEL_AES |
3161 OP_ALG_AAI_CTR_MOD128,
3162 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3163 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3164 .rfc3686 = true,
3165 },
daebc465
CV
3166 },
3167 {
479bcc7c
HX
3168 .aead = {
3169 .base = {
3170 .cra_name = "seqiv(authenc("
3171 "hmac(sha1),rfc3686(ctr(aes))))",
3172 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3173 "rfc3686-ctr-aes-caam",
3174 .cra_blocksize = 1,
3175 },
daebc465
CV
3176 .setkey = aead_setkey,
3177 .setauthsize = aead_setauthsize,
479bcc7c 3178 .encrypt = aead_encrypt,
8b18e235 3179 .decrypt = aead_decrypt,
daebc465 3180 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3181 .maxauthsize = SHA1_DIGEST_SIZE,
3182 },
3183 .caam = {
3184 .class1_alg_type = OP_ALG_ALGSEL_AES |
3185 OP_ALG_AAI_CTR_MOD128,
3186 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3187 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3188 .rfc3686 = true,
3189 .geniv = true,
3190 },
daebc465
CV
3191 },
3192 {
479bcc7c
HX
3193 .aead = {
3194 .base = {
3195 .cra_name = "authenc(hmac(sha224),"
3196 "rfc3686(ctr(aes)))",
3197 .cra_driver_name = "authenc-hmac-sha224-"
3198 "rfc3686-ctr-aes-caam",
3199 .cra_blocksize = 1,
3200 },
daebc465
CV
3201 .setkey = aead_setkey,
3202 .setauthsize = aead_setauthsize,
479bcc7c
HX
3203 .encrypt = aead_encrypt,
3204 .decrypt = aead_decrypt,
daebc465 3205 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3206 .maxauthsize = SHA224_DIGEST_SIZE,
3207 },
3208 .caam = {
3209 .class1_alg_type = OP_ALG_ALGSEL_AES |
3210 OP_ALG_AAI_CTR_MOD128,
3211 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3212 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3213 .rfc3686 = true,
3214 },
daebc465
CV
3215 },
3216 {
479bcc7c
HX
3217 .aead = {
3218 .base = {
3219 .cra_name = "seqiv(authenc("
3220 "hmac(sha224),rfc3686(ctr(aes))))",
3221 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3222 "rfc3686-ctr-aes-caam",
3223 .cra_blocksize = 1,
3224 },
daebc465
CV
3225 .setkey = aead_setkey,
3226 .setauthsize = aead_setauthsize,
479bcc7c 3227 .encrypt = aead_encrypt,
8b18e235 3228 .decrypt = aead_decrypt,
daebc465 3229 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3230 .maxauthsize = SHA224_DIGEST_SIZE,
3231 },
3232 .caam = {
3233 .class1_alg_type = OP_ALG_ALGSEL_AES |
3234 OP_ALG_AAI_CTR_MOD128,
3235 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3236 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3237 .rfc3686 = true,
3238 .geniv = true,
3239 },
acdca31d
YK
3240 },
3241 {
479bcc7c
HX
3242 .aead = {
3243 .base = {
3244 .cra_name = "authenc(hmac(sha256),"
3245 "rfc3686(ctr(aes)))",
3246 .cra_driver_name = "authenc-hmac-sha256-"
3247 "rfc3686-ctr-aes-caam",
3248 .cra_blocksize = 1,
acdca31d 3249 },
479bcc7c
HX
3250 .setkey = aead_setkey,
3251 .setauthsize = aead_setauthsize,
3252 .encrypt = aead_encrypt,
3253 .decrypt = aead_decrypt,
3254 .ivsize = CTR_RFC3686_IV_SIZE,
3255 .maxauthsize = SHA256_DIGEST_SIZE,
3256 },
3257 .caam = {
3258 .class1_alg_type = OP_ALG_ALGSEL_AES |
3259 OP_ALG_AAI_CTR_MOD128,
3260 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3261 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3262 .rfc3686 = true,
3263 },
acdca31d
YK
3264 },
3265 {
479bcc7c
HX
3266 .aead = {
3267 .base = {
3268 .cra_name = "seqiv(authenc(hmac(sha256),"
3269 "rfc3686(ctr(aes))))",
3270 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3271 "rfc3686-ctr-aes-caam",
3272 .cra_blocksize = 1,
acdca31d 3273 },
479bcc7c
HX
3274 .setkey = aead_setkey,
3275 .setauthsize = aead_setauthsize,
3276 .encrypt = aead_encrypt,
8b18e235 3277 .decrypt = aead_decrypt,
479bcc7c
HX
3278 .ivsize = CTR_RFC3686_IV_SIZE,
3279 .maxauthsize = SHA256_DIGEST_SIZE,
3280 },
3281 .caam = {
3282 .class1_alg_type = OP_ALG_ALGSEL_AES |
3283 OP_ALG_AAI_CTR_MOD128,
3284 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3285 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3286 .rfc3686 = true,
3287 .geniv = true,
3288 },
2b22f6c5
CV
3289 },
3290 {
479bcc7c
HX
3291 .aead = {
3292 .base = {
3293 .cra_name = "authenc(hmac(sha384),"
3294 "rfc3686(ctr(aes)))",
3295 .cra_driver_name = "authenc-hmac-sha384-"
3296 "rfc3686-ctr-aes-caam",
3297 .cra_blocksize = 1,
2b22f6c5 3298 },
479bcc7c
HX
3299 .setkey = aead_setkey,
3300 .setauthsize = aead_setauthsize,
3301 .encrypt = aead_encrypt,
3302 .decrypt = aead_decrypt,
a5f57cff 3303 .ivsize = CTR_RFC3686_IV_SIZE,
479bcc7c
HX
3304 .maxauthsize = SHA384_DIGEST_SIZE,
3305 },
3306 .caam = {
3307 .class1_alg_type = OP_ALG_ALGSEL_AES |
3308 OP_ALG_AAI_CTR_MOD128,
3309 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3310 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3311 .rfc3686 = true,
3312 },
3313 },
f2147b88
HX
3314 {
3315 .aead = {
3316 .base = {
479bcc7c
HX
3317 .cra_name = "seqiv(authenc(hmac(sha384),"
3318 "rfc3686(ctr(aes))))",
3319 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3320 "rfc3686-ctr-aes-caam",
f2147b88
HX
3321 .cra_blocksize = 1,
3322 },
479bcc7c
HX
3323 .setkey = aead_setkey,
3324 .setauthsize = aead_setauthsize,
3325 .encrypt = aead_encrypt,
8b18e235 3326 .decrypt = aead_decrypt,
479bcc7c
HX
3327 .ivsize = CTR_RFC3686_IV_SIZE,
3328 .maxauthsize = SHA384_DIGEST_SIZE,
f2147b88
HX
3329 },
3330 .caam = {
479bcc7c
HX
3331 .class1_alg_type = OP_ALG_ALGSEL_AES |
3332 OP_ALG_AAI_CTR_MOD128,
3333 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3334 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3335 .rfc3686 = true,
3336 .geniv = true,
f2147b88
HX
3337 },
3338 },
3339 {
3340 .aead = {
3341 .base = {
479bcc7c
HX
3342 .cra_name = "authenc(hmac(sha512),"
3343 "rfc3686(ctr(aes)))",
3344 .cra_driver_name = "authenc-hmac-sha512-"
3345 "rfc3686-ctr-aes-caam",
f2147b88
HX
3346 .cra_blocksize = 1,
3347 },
479bcc7c
HX
3348 .setkey = aead_setkey,
3349 .setauthsize = aead_setauthsize,
3350 .encrypt = aead_encrypt,
3351 .decrypt = aead_decrypt,
3352 .ivsize = CTR_RFC3686_IV_SIZE,
3353 .maxauthsize = SHA512_DIGEST_SIZE,
f2147b88
HX
3354 },
3355 .caam = {
479bcc7c
HX
3356 .class1_alg_type = OP_ALG_ALGSEL_AES |
3357 OP_ALG_AAI_CTR_MOD128,
3358 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3359 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c 3360 .rfc3686 = true,
f2147b88
HX
3361 },
3362 },
f2147b88
HX
3363 {
3364 .aead = {
3365 .base = {
479bcc7c
HX
3366 .cra_name = "seqiv(authenc(hmac(sha512),"
3367 "rfc3686(ctr(aes))))",
3368 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3369 "rfc3686-ctr-aes-caam",
f2147b88
HX
3370 .cra_blocksize = 1,
3371 },
479bcc7c
HX
3372 .setkey = aead_setkey,
3373 .setauthsize = aead_setauthsize,
3374 .encrypt = aead_encrypt,
8b18e235 3375 .decrypt = aead_decrypt,
479bcc7c
HX
3376 .ivsize = CTR_RFC3686_IV_SIZE,
3377 .maxauthsize = SHA512_DIGEST_SIZE,
f2147b88
HX
3378 },
3379 .caam = {
479bcc7c
HX
3380 .class1_alg_type = OP_ALG_ALGSEL_AES |
3381 OP_ALG_AAI_CTR_MOD128,
3382 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3383 OP_ALG_AAI_HMAC_PRECOMP,
479bcc7c
HX
3384 .rfc3686 = true,
3385 .geniv = true,
f2147b88
HX
3386 },
3387 },
3388};
3389
3390struct caam_crypto_alg {
8e8ec596 3391 struct crypto_alg crypto_alg;
f2147b88
HX
3392 struct list_head entry;
3393 struct caam_alg_entry caam;
8e8ec596
KP
3394};
3395
f2147b88 3396static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
8e8ec596 3397{
cfc6f11b
RG
3398 ctx->jrdev = caam_jr_alloc();
3399 if (IS_ERR(ctx->jrdev)) {
3400 pr_err("Job Ring Device allocation for transform failed\n");
3401 return PTR_ERR(ctx->jrdev);
3402 }
8e8ec596
KP
3403
3404 /* copy descriptor header template value */
db57656b
HG
3405 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3406 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
8e8ec596
KP
3407
3408 return 0;
3409}
3410
f2147b88 3411static int caam_cra_init(struct crypto_tfm *tfm)
8e8ec596 3412{
f2147b88
HX
3413 struct crypto_alg *alg = tfm->__crt_alg;
3414 struct caam_crypto_alg *caam_alg =
3415 container_of(alg, struct caam_crypto_alg, crypto_alg);
8e8ec596
KP
3416 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3417
f2147b88
HX
3418 return caam_init_common(ctx, &caam_alg->caam);
3419}
3420
3421static int caam_aead_init(struct crypto_aead *tfm)
3422{
3423 struct aead_alg *alg = crypto_aead_alg(tfm);
3424 struct caam_aead_alg *caam_alg =
3425 container_of(alg, struct caam_aead_alg, aead);
3426 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
3427
3428 return caam_init_common(ctx, &caam_alg->caam);
3429}
3430
3431static void caam_exit_common(struct caam_ctx *ctx)
3432{
1acebad3
YK
3433 if (ctx->sh_desc_enc_dma &&
3434 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
3435 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
3436 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
3437 if (ctx->sh_desc_dec_dma &&
3438 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
3439 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
3440 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
3441 if (ctx->sh_desc_givenc_dma &&
3442 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
3443 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
3444 desc_bytes(ctx->sh_desc_givenc),
4427b1b4 3445 DMA_TO_DEVICE);
ec31eed7
HG
3446 if (ctx->key_dma &&
3447 !dma_mapping_error(ctx->jrdev, ctx->key_dma))
3448 dma_unmap_single(ctx->jrdev, ctx->key_dma,
db57656b 3449 ctx->cdata.keylen + ctx->adata.keylen_pad,
ec31eed7 3450 DMA_TO_DEVICE);
cfc6f11b
RG
3451
3452 caam_jr_free(ctx->jrdev);
8e8ec596
KP
3453}
3454
f2147b88
HX
3455static void caam_cra_exit(struct crypto_tfm *tfm)
3456{
3457 caam_exit_common(crypto_tfm_ctx(tfm));
3458}
3459
3460static void caam_aead_exit(struct crypto_aead *tfm)
3461{
3462 caam_exit_common(crypto_aead_ctx(tfm));
3463}
3464
8e8ec596
KP
3465static void __exit caam_algapi_exit(void)
3466{
3467
8e8ec596 3468 struct caam_crypto_alg *t_alg, *n;
f2147b88
HX
3469 int i;
3470
3471 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3472 struct caam_aead_alg *t_alg = driver_aeads + i;
3473
3474 if (t_alg->registered)
3475 crypto_unregister_aead(&t_alg->aead);
3476 }
8e8ec596 3477
cfc6f11b 3478 if (!alg_list.next)
8e8ec596
KP
3479 return;
3480
cfc6f11b 3481 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
8e8ec596
KP
3482 crypto_unregister_alg(&t_alg->crypto_alg);
3483 list_del(&t_alg->entry);
3484 kfree(t_alg);
3485 }
8e8ec596
KP
3486}
3487
cfc6f11b 3488static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
8e8ec596
KP
3489 *template)
3490{
3491 struct caam_crypto_alg *t_alg;
3492 struct crypto_alg *alg;
3493
9c4f9733 3494 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
8e8ec596 3495 if (!t_alg) {
cfc6f11b 3496 pr_err("failed to allocate t_alg\n");
8e8ec596
KP
3497 return ERR_PTR(-ENOMEM);
3498 }
3499
3500 alg = &t_alg->crypto_alg;
3501
3502 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3503 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3504 template->driver_name);
3505 alg->cra_module = THIS_MODULE;
3506 alg->cra_init = caam_cra_init;
3507 alg->cra_exit = caam_cra_exit;
3508 alg->cra_priority = CAAM_CRA_PRIORITY;
8e8ec596
KP
3509 alg->cra_blocksize = template->blocksize;
3510 alg->cra_alignmask = 0;
8e8ec596 3511 alg->cra_ctxsize = sizeof(struct caam_ctx);
d912bb76
NM
3512 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3513 template->type;
885e9e2f 3514 switch (template->type) {
7222d1a3
CV
3515 case CRYPTO_ALG_TYPE_GIVCIPHER:
3516 alg->cra_type = &crypto_givcipher_type;
3517 alg->cra_ablkcipher = template->template_ablkcipher;
3518 break;
acdca31d
YK
3519 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3520 alg->cra_type = &crypto_ablkcipher_type;
3521 alg->cra_ablkcipher = template->template_ablkcipher;
3522 break;
885e9e2f 3523 }
8e8ec596 3524
f2147b88
HX
3525 t_alg->caam.class1_alg_type = template->class1_alg_type;
3526 t_alg->caam.class2_alg_type = template->class2_alg_type;
8e8ec596
KP
3527
3528 return t_alg;
3529}
3530
f2147b88
HX
3531static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3532{
3533 struct aead_alg *alg = &t_alg->aead;
3534
3535 alg->base.cra_module = THIS_MODULE;
3536 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3537 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
5e4b8c1f 3538 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
f2147b88
HX
3539
3540 alg->init = caam_aead_init;
3541 alg->exit = caam_aead_exit;
3542}
3543
8e8ec596
KP
3544static int __init caam_algapi_init(void)
3545{
35af6403
RG
3546 struct device_node *dev_node;
3547 struct platform_device *pdev;
3548 struct device *ctrldev;
bf83490e 3549 struct caam_drv_private *priv;
8e8ec596 3550 int i = 0, err = 0;
bf83490e
VM
3551 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
3552 unsigned int md_limit = SHA512_DIGEST_SIZE;
f2147b88 3553 bool registered = false;
8e8ec596 3554
35af6403
RG
3555 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3556 if (!dev_node) {
3557 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3558 if (!dev_node)
3559 return -ENODEV;
3560 }
3561
3562 pdev = of_find_device_by_node(dev_node);
3563 if (!pdev) {
3564 of_node_put(dev_node);
3565 return -ENODEV;
3566 }
3567
3568 ctrldev = &pdev->dev;
3569 priv = dev_get_drvdata(ctrldev);
3570 of_node_put(dev_node);
3571
3572 /*
3573 * If priv is NULL, it's probably because the caam driver wasn't
3574 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3575 */
3576 if (!priv)
3577 return -ENODEV;
3578
3579
cfc6f11b 3580 INIT_LIST_HEAD(&alg_list);
8e8ec596 3581
bf83490e
VM
3582 /*
3583 * Register crypto algorithms the device supports.
3584 * First, detect presence and attributes of DES, AES, and MD blocks.
3585 */
3586 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3587 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3588 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
3589 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
3590 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3591
3592 /* If MD is present, limit digest size based on LP256 */
3593 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
3594 md_limit = SHA256_DIGEST_SIZE;
3595
8e8ec596 3596 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
8e8ec596 3597 struct caam_crypto_alg *t_alg;
bf83490e
VM
3598 struct caam_alg_template *alg = driver_algs + i;
3599 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3600
3601 /* Skip DES algorithms if not supported by device */
3602 if (!des_inst &&
3603 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3604 (alg_sel == OP_ALG_ALGSEL_DES)))
3605 continue;
3606
3607 /* Skip AES algorithms if not supported by device */
3608 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3609 continue;
8e8ec596 3610
83d2c9a9
SE
3611 /*
3612 * Check support for AES modes not available
3613 * on LP devices.
3614 */
3615 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3616 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
3617 OP_ALG_AAI_XTS)
3618 continue;
3619
bf83490e 3620 t_alg = caam_alg_alloc(alg);
8e8ec596
KP
3621 if (IS_ERR(t_alg)) {
3622 err = PTR_ERR(t_alg);
bf83490e 3623 pr_warn("%s alg allocation failed\n", alg->driver_name);
8e8ec596
KP
3624 continue;
3625 }
3626
3627 err = crypto_register_alg(&t_alg->crypto_alg);
3628 if (err) {
cfc6f11b 3629 pr_warn("%s alg registration failed\n",
8e8ec596
KP
3630 t_alg->crypto_alg.cra_driver_name);
3631 kfree(t_alg);
f2147b88
HX
3632 continue;
3633 }
3634
3635 list_add_tail(&t_alg->entry, &alg_list);
3636 registered = true;
3637 }
3638
3639 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3640 struct caam_aead_alg *t_alg = driver_aeads + i;
bf83490e
VM
3641 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3642 OP_ALG_ALGSEL_MASK;
3643 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3644 OP_ALG_ALGSEL_MASK;
3645 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3646
3647 /* Skip DES algorithms if not supported by device */
3648 if (!des_inst &&
3649 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3650 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3651 continue;
3652
3653 /* Skip AES algorithms if not supported by device */
3654 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3655 continue;
3656
3657 /*
3658 * Check support for AES algorithms not available
3659 * on LP devices.
3660 */
3661 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3662 if (alg_aai == OP_ALG_AAI_GCM)
3663 continue;
3664
3665 /*
3666 * Skip algorithms requiring message digests
3667 * if MD or MD size is not supported by device.
3668 */
3669 if (c2_alg_sel &&
3670 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
3671 continue;
f2147b88
HX
3672
3673 caam_aead_alg_init(t_alg);
3674
3675 err = crypto_register_aead(&t_alg->aead);
3676 if (err) {
3677 pr_warn("%s alg registration failed\n",
3678 t_alg->aead.base.cra_driver_name);
3679 continue;
3680 }
3681
3682 t_alg->registered = true;
3683 registered = true;
8e8ec596 3684 }
f2147b88
HX
3685
3686 if (registered)
cfc6f11b 3687 pr_info("caam algorithms registered in /proc/crypto\n");
8e8ec596
KP
3688
3689 return err;
3690}
3691
3692module_init(caam_algapi_init);
3693module_exit(caam_algapi_exit);
3694
3695MODULE_LICENSE("GPL");
3696MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3697MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");