]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/caam/caamalg_qi2.c
Merge tag 'powerpc-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / caam / caamalg_qi2.c
CommitLineData
8d818c10
HG
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2018 NXP
5 */
6
7#include "compat.h"
8#include "regs.h"
9#include "caamalg_qi2.h"
10#include "dpseci_cmd.h"
11#include "desc_constr.h"
12#include "error.h"
13#include "sg_sw_sec4.h"
14#include "sg_sw_qm2.h"
15#include "key_gen.h"
16#include "caamalg_desc.h"
3f16f6c9 17#include "caamhash_desc.h"
8d818c10
HG
18#include <linux/fsl/mc.h>
19#include <soc/fsl/dpaa2-io.h>
20#include <soc/fsl/dpaa2-fd.h>
21
22#define CAAM_CRA_PRIORITY 2000
23
24/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
25#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
26 SHA512_DIGEST_SIZE * 2)
27
8d818c10
HG
28/*
29 * This is a a cache of buffers, from which the users of CAAM QI driver
30 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
31 * NOTE: A more elegant solution would be to have some headroom in the frames
32 * being processed. This can be added by the dpaa2-eth driver. This would
33 * pose a problem for userspace application processing which cannot
34 * know of this limitation. So for now, this will work.
35 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
36 */
37static struct kmem_cache *qi_cache;
38
39struct caam_alg_entry {
40 struct device *dev;
41 int class1_alg_type;
42 int class2_alg_type;
43 bool rfc3686;
44 bool geniv;
24586b5f 45 bool nodkp;
8d818c10
HG
46};
47
48struct caam_aead_alg {
49 struct aead_alg aead;
50 struct caam_alg_entry caam;
51 bool registered;
52};
53
226853ac
HG
54struct caam_skcipher_alg {
55 struct skcipher_alg skcipher;
56 struct caam_alg_entry caam;
57 bool registered;
58};
59
8d818c10
HG
60/**
61 * caam_ctx - per-session context
62 * @flc: Flow Contexts array
63 * @key: [authentication key], encryption key
64 * @flc_dma: I/O virtual addresses of the Flow Contexts
65 * @key_dma: I/O virtual address of the key
66 * @dir: DMA direction for mapping key and Flow Contexts
67 * @dev: dpseci device
68 * @adata: authentication algorithm details
69 * @cdata: encryption algorithm details
70 * @authsize: authentication tag (a.k.a. ICV / MAC) size
71 */
72struct caam_ctx {
73 struct caam_flc flc[NUM_OP];
74 u8 key[CAAM_MAX_KEY_SIZE];
75 dma_addr_t flc_dma[NUM_OP];
76 dma_addr_t key_dma;
77 enum dma_data_direction dir;
78 struct device *dev;
79 struct alginfo adata;
80 struct alginfo cdata;
81 unsigned int authsize;
82};
83
84static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
85 dma_addr_t iova_addr)
86{
87 phys_addr_t phys_addr;
88
89 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
90 iova_addr;
91
92 return phys_to_virt(phys_addr);
93}
94
95/*
96 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
97 *
98 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
99 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
100 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
101 * hosting 16 SG entries.
102 *
103 * @flags - flags that would be used for the equivalent kmalloc(..) call
104 *
105 * Returns a pointer to a retrieved buffer on success or NULL on failure.
106 */
107static inline void *qi_cache_zalloc(gfp_t flags)
108{
109 return kmem_cache_zalloc(qi_cache, flags);
110}
111
112/*
113 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
114 *
115 * @obj - buffer previously allocated by qi_cache_zalloc
116 *
117 * No checking is being done, the call is a passthrough call to
118 * kmem_cache_free(...)
119 */
120static inline void qi_cache_free(void *obj)
121{
122 kmem_cache_free(qi_cache, obj);
123}
124
125static struct caam_request *to_caam_req(struct crypto_async_request *areq)
126{
127 switch (crypto_tfm_alg_type(areq->tfm)) {
128 case CRYPTO_ALG_TYPE_SKCIPHER:
129 return skcipher_request_ctx(skcipher_request_cast(areq));
130 case CRYPTO_ALG_TYPE_AEAD:
131 return aead_request_ctx(container_of(areq, struct aead_request,
132 base));
3f16f6c9
HG
133 case CRYPTO_ALG_TYPE_AHASH:
134 return ahash_request_ctx(ahash_request_cast(areq));
8d818c10
HG
135 default:
136 return ERR_PTR(-EINVAL);
137 }
138}
139
140static void caam_unmap(struct device *dev, struct scatterlist *src,
141 struct scatterlist *dst, int src_nents,
142 int dst_nents, dma_addr_t iv_dma, int ivsize,
143 dma_addr_t qm_sg_dma, int qm_sg_bytes)
144{
145 if (dst != src) {
146 if (src_nents)
147 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
763069ba
HG
148 if (dst_nents)
149 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
8d818c10
HG
150 } else {
151 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
152 }
153
154 if (iv_dma)
155 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
156
157 if (qm_sg_bytes)
158 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
159}
160
161static int aead_set_sh_desc(struct crypto_aead *aead)
162{
163 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
164 typeof(*alg), aead);
165 struct caam_ctx *ctx = crypto_aead_ctx(aead);
166 unsigned int ivsize = crypto_aead_ivsize(aead);
167 struct device *dev = ctx->dev;
168 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
169 struct caam_flc *flc;
170 u32 *desc;
171 u32 ctx1_iv_off = 0;
172 u32 *nonce = NULL;
173 unsigned int data_len[2];
174 u32 inl_mask;
175 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
176 OP_ALG_AAI_CTR_MOD128);
177 const bool is_rfc3686 = alg->caam.rfc3686;
178
179 if (!ctx->cdata.keylen || !ctx->authsize)
180 return 0;
181
182 /*
183 * AES-CTR needs to load IV in CONTEXT1 reg
184 * at an offset of 128bits (16bytes)
185 * CONTEXT1[255:128] = IV
186 */
187 if (ctr_mode)
188 ctx1_iv_off = 16;
189
190 /*
191 * RFC3686 specific:
192 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
193 */
194 if (is_rfc3686) {
195 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
196 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
197 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
198 }
199
200 data_len[0] = ctx->adata.keylen_pad;
201 data_len[1] = ctx->cdata.keylen;
202
203 /* aead_encrypt shared descriptor */
204 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
205 DESC_QI_AEAD_ENC_LEN) +
206 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
207 DESC_JOB_IO_LEN, data_len, &inl_mask,
208 ARRAY_SIZE(data_len)) < 0)
209 return -EINVAL;
210
211 if (inl_mask & 1)
212 ctx->adata.key_virt = ctx->key;
213 else
214 ctx->adata.key_dma = ctx->key_dma;
215
216 if (inl_mask & 2)
217 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
218 else
219 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
220
221 ctx->adata.key_inline = !!(inl_mask & 1);
222 ctx->cdata.key_inline = !!(inl_mask & 2);
223
224 flc = &ctx->flc[ENCRYPT];
225 desc = flc->sh_desc;
226
227 if (alg->caam.geniv)
228 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
229 ivsize, ctx->authsize, is_rfc3686,
230 nonce, ctx1_iv_off, true,
231 priv->sec_attr.era);
232 else
233 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
234 ivsize, ctx->authsize, is_rfc3686, nonce,
235 ctx1_iv_off, true, priv->sec_attr.era);
236
237 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
238 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
239 sizeof(flc->flc) + desc_bytes(desc),
240 ctx->dir);
241
242 /* aead_decrypt shared descriptor */
243 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
244 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
245 DESC_JOB_IO_LEN, data_len, &inl_mask,
246 ARRAY_SIZE(data_len)) < 0)
247 return -EINVAL;
248
249 if (inl_mask & 1)
250 ctx->adata.key_virt = ctx->key;
251 else
252 ctx->adata.key_dma = ctx->key_dma;
253
254 if (inl_mask & 2)
255 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
256 else
257 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
258
259 ctx->adata.key_inline = !!(inl_mask & 1);
260 ctx->cdata.key_inline = !!(inl_mask & 2);
261
262 flc = &ctx->flc[DECRYPT];
263 desc = flc->sh_desc;
264 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
265 ivsize, ctx->authsize, alg->caam.geniv,
266 is_rfc3686, nonce, ctx1_iv_off, true,
267 priv->sec_attr.era);
268 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
269 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
270 sizeof(flc->flc) + desc_bytes(desc),
271 ctx->dir);
272
273 return 0;
274}
275
276static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
277{
278 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
279
280 ctx->authsize = authsize;
281 aead_set_sh_desc(authenc);
282
283 return 0;
284}
285
286static int aead_setkey(struct crypto_aead *aead, const u8 *key,
287 unsigned int keylen)
288{
289 struct caam_ctx *ctx = crypto_aead_ctx(aead);
290 struct device *dev = ctx->dev;
291 struct crypto_authenc_keys keys;
292
293 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
294 goto badkey;
295
296 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
297 keys.authkeylen + keys.enckeylen, keys.enckeylen,
298 keys.authkeylen);
299 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
300 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
301
302 ctx->adata.keylen = keys.authkeylen;
303 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
304 OP_ALG_ALGSEL_MASK);
305
306 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
307 goto badkey;
308
309 memcpy(ctx->key, keys.authkey, keys.authkeylen);
310 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
311 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
312 keys.enckeylen, ctx->dir);
313 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
314 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
315 ctx->adata.keylen_pad + keys.enckeylen, 1);
316
317 ctx->cdata.keylen = keys.enckeylen;
318
319 memzero_explicit(&keys, sizeof(keys));
320 return aead_set_sh_desc(aead);
321badkey:
322 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
323 memzero_explicit(&keys, sizeof(keys));
324 return -EINVAL;
325}
326
1b52c409
HX
327static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
328 unsigned int keylen)
329{
330 struct crypto_authenc_keys keys;
331 u32 flags;
332 int err;
333
334 err = crypto_authenc_extractkeys(&keys, key, keylen);
335 if (unlikely(err))
336 goto badkey;
337
338 err = -EINVAL;
339 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
340 goto badkey;
341
342 flags = crypto_aead_get_flags(aead);
343 err = __des3_verify_key(&flags, keys.enckey);
344 if (unlikely(err)) {
345 crypto_aead_set_flags(aead, flags);
346 goto out;
347 }
348
349 err = aead_setkey(aead, key, keylen);
350
351out:
352 memzero_explicit(&keys, sizeof(keys));
353 return err;
354
355badkey:
356 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
357 goto out;
358}
359
8d818c10
HG
360static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
361 bool encrypt)
362{
363 struct crypto_aead *aead = crypto_aead_reqtfm(req);
364 struct caam_request *req_ctx = aead_request_ctx(req);
365 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
366 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
367 struct caam_ctx *ctx = crypto_aead_ctx(aead);
368 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
369 typeof(*alg), aead);
370 struct device *dev = ctx->dev;
371 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
372 GFP_KERNEL : GFP_ATOMIC;
373 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
374 struct aead_edesc *edesc;
375 dma_addr_t qm_sg_dma, iv_dma = 0;
376 int ivsize = 0;
377 unsigned int authsize = ctx->authsize;
378 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
379 int in_len, out_len;
380 struct dpaa2_sg_entry *sg_table;
381
382 /* allocate space for base edesc, link tables and IV */
383 edesc = qi_cache_zalloc(GFP_DMA | flags);
384 if (unlikely(!edesc)) {
385 dev_err(dev, "could not allocate extended descriptor\n");
386 return ERR_PTR(-ENOMEM);
387 }
388
389 if (unlikely(req->dst != req->src)) {
390 src_nents = sg_nents_for_len(req->src, req->assoclen +
391 req->cryptlen);
392 if (unlikely(src_nents < 0)) {
393 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
394 req->assoclen + req->cryptlen);
395 qi_cache_free(edesc);
396 return ERR_PTR(src_nents);
397 }
398
399 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
400 req->cryptlen +
401 (encrypt ? authsize :
402 (-authsize)));
403 if (unlikely(dst_nents < 0)) {
404 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
405 req->assoclen + req->cryptlen +
406 (encrypt ? authsize : (-authsize)));
407 qi_cache_free(edesc);
408 return ERR_PTR(dst_nents);
409 }
410
411 if (src_nents) {
412 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
413 DMA_TO_DEVICE);
414 if (unlikely(!mapped_src_nents)) {
415 dev_err(dev, "unable to map source\n");
416 qi_cache_free(edesc);
417 return ERR_PTR(-ENOMEM);
418 }
419 } else {
420 mapped_src_nents = 0;
421 }
422
763069ba
HG
423 if (dst_nents) {
424 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
425 DMA_FROM_DEVICE);
426 if (unlikely(!mapped_dst_nents)) {
427 dev_err(dev, "unable to map destination\n");
428 dma_unmap_sg(dev, req->src, src_nents,
429 DMA_TO_DEVICE);
430 qi_cache_free(edesc);
431 return ERR_PTR(-ENOMEM);
432 }
433 } else {
434 mapped_dst_nents = 0;
8d818c10
HG
435 }
436 } else {
437 src_nents = sg_nents_for_len(req->src, req->assoclen +
438 req->cryptlen +
439 (encrypt ? authsize : 0));
440 if (unlikely(src_nents < 0)) {
441 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
442 req->assoclen + req->cryptlen +
443 (encrypt ? authsize : 0));
444 qi_cache_free(edesc);
445 return ERR_PTR(src_nents);
446 }
447
448 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
449 DMA_BIDIRECTIONAL);
450 if (unlikely(!mapped_src_nents)) {
451 dev_err(dev, "unable to map source\n");
452 qi_cache_free(edesc);
453 return ERR_PTR(-ENOMEM);
454 }
455 }
456
457 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
458 ivsize = crypto_aead_ivsize(aead);
459
460 /*
461 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
462 * Input is not contiguous.
463 */
464 qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
465 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
466 sg_table = &edesc->sgt[0];
467 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
468 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
469 CAAM_QI_MEMCACHE_SIZE)) {
470 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
471 qm_sg_nents, ivsize);
472 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
473 0, 0, 0);
474 qi_cache_free(edesc);
475 return ERR_PTR(-ENOMEM);
476 }
477
478 if (ivsize) {
479 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
480
481 /* Make sure IV is located in a DMAable area */
482 memcpy(iv, req->iv, ivsize);
483
484 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
485 if (dma_mapping_error(dev, iv_dma)) {
486 dev_err(dev, "unable to map IV\n");
487 caam_unmap(dev, req->src, req->dst, src_nents,
488 dst_nents, 0, 0, 0, 0);
489 qi_cache_free(edesc);
490 return ERR_PTR(-ENOMEM);
491 }
492 }
493
494 edesc->src_nents = src_nents;
495 edesc->dst_nents = dst_nents;
496 edesc->iv_dma = iv_dma;
497
c10a5336
HG
498 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
499 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
500 /*
501 * The associated data comes already with the IV but we need
502 * to skip it when we authenticate or encrypt...
503 */
504 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
505 else
506 edesc->assoclen = cpu_to_caam32(req->assoclen);
8d818c10
HG
507 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
508 DMA_TO_DEVICE);
509 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
510 dev_err(dev, "unable to map assoclen\n");
511 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
512 iv_dma, ivsize, 0, 0);
513 qi_cache_free(edesc);
514 return ERR_PTR(-ENOMEM);
515 }
516
517 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
518 qm_sg_index++;
519 if (ivsize) {
520 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
521 qm_sg_index++;
522 }
523 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
524 qm_sg_index += mapped_src_nents;
525
526 if (mapped_dst_nents > 1)
527 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
528 qm_sg_index, 0);
529
530 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 if (dma_mapping_error(dev, qm_sg_dma)) {
532 dev_err(dev, "unable to map S/G table\n");
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 iv_dma, ivsize, 0, 0);
536 qi_cache_free(edesc);
537 return ERR_PTR(-ENOMEM);
538 }
539
540 edesc->qm_sg_dma = qm_sg_dma;
541 edesc->qm_sg_bytes = qm_sg_bytes;
542
543 out_len = req->assoclen + req->cryptlen +
544 (encrypt ? ctx->authsize : (-ctx->authsize));
545 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
546
547 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 dpaa2_fl_set_final(in_fle, true);
549 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 dpaa2_fl_set_len(in_fle, in_len);
552
553 if (req->dst == req->src) {
554 if (mapped_src_nents == 1) {
555 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
557 } else {
558 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 (1 + !!ivsize) * sizeof(*sg_table));
561 }
562 } else if (mapped_dst_nents == 1) {
563 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
564 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
565 } else {
566 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
567 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
568 sizeof(*sg_table));
569 }
570
571 dpaa2_fl_set_len(out_fle, out_len);
572
573 return edesc;
574}
575
c10a5336
HG
576static int chachapoly_set_sh_desc(struct crypto_aead *aead)
577{
578 struct caam_ctx *ctx = crypto_aead_ctx(aead);
579 unsigned int ivsize = crypto_aead_ivsize(aead);
580 struct device *dev = ctx->dev;
581 struct caam_flc *flc;
582 u32 *desc;
583
584 if (!ctx->cdata.keylen || !ctx->authsize)
585 return 0;
586
587 flc = &ctx->flc[ENCRYPT];
588 desc = flc->sh_desc;
589 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
590 ctx->authsize, true, true);
591 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
592 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
593 sizeof(flc->flc) + desc_bytes(desc),
594 ctx->dir);
595
596 flc = &ctx->flc[DECRYPT];
597 desc = flc->sh_desc;
598 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 ctx->authsize, false, true);
600 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
602 sizeof(flc->flc) + desc_bytes(desc),
603 ctx->dir);
604
605 return 0;
606}
607
608static int chachapoly_setauthsize(struct crypto_aead *aead,
609 unsigned int authsize)
610{
611 struct caam_ctx *ctx = crypto_aead_ctx(aead);
612
613 if (authsize != POLY1305_DIGEST_SIZE)
614 return -EINVAL;
615
616 ctx->authsize = authsize;
617 return chachapoly_set_sh_desc(aead);
618}
619
620static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
621 unsigned int keylen)
622{
623 struct caam_ctx *ctx = crypto_aead_ctx(aead);
624 unsigned int ivsize = crypto_aead_ivsize(aead);
625 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
626
1ca1b917 627 if (keylen != CHACHA_KEY_SIZE + saltlen) {
c10a5336
HG
628 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
629 return -EINVAL;
630 }
631
632 ctx->cdata.key_virt = key;
633 ctx->cdata.keylen = keylen - saltlen;
634
635 return chachapoly_set_sh_desc(aead);
636}
637
8d818c10
HG
638static int gcm_set_sh_desc(struct crypto_aead *aead)
639{
640 struct caam_ctx *ctx = crypto_aead_ctx(aead);
641 struct device *dev = ctx->dev;
642 unsigned int ivsize = crypto_aead_ivsize(aead);
643 struct caam_flc *flc;
644 u32 *desc;
645 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
646 ctx->cdata.keylen;
647
648 if (!ctx->cdata.keylen || !ctx->authsize)
649 return 0;
650
651 /*
652 * AES GCM encrypt shared descriptor
653 * Job Descriptor and Shared Descriptor
654 * must fit into the 64-word Descriptor h/w Buffer
655 */
656 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
657 ctx->cdata.key_inline = true;
658 ctx->cdata.key_virt = ctx->key;
659 } else {
660 ctx->cdata.key_inline = false;
661 ctx->cdata.key_dma = ctx->key_dma;
662 }
663
664 flc = &ctx->flc[ENCRYPT];
665 desc = flc->sh_desc;
666 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
667 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
668 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
669 sizeof(flc->flc) + desc_bytes(desc),
670 ctx->dir);
671
672 /*
673 * Job Descriptor and Shared Descriptors
674 * must all fit into the 64-word Descriptor h/w Buffer
675 */
676 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
677 ctx->cdata.key_inline = true;
678 ctx->cdata.key_virt = ctx->key;
679 } else {
680 ctx->cdata.key_inline = false;
681 ctx->cdata.key_dma = ctx->key_dma;
682 }
683
684 flc = &ctx->flc[DECRYPT];
685 desc = flc->sh_desc;
686 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
687 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
688 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
689 sizeof(flc->flc) + desc_bytes(desc),
690 ctx->dir);
691
692 return 0;
693}
694
695static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
696{
697 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
698
699 ctx->authsize = authsize;
700 gcm_set_sh_desc(authenc);
701
702 return 0;
703}
704
705static int gcm_setkey(struct crypto_aead *aead,
706 const u8 *key, unsigned int keylen)
707{
708 struct caam_ctx *ctx = crypto_aead_ctx(aead);
709 struct device *dev = ctx->dev;
710
711 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
712 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
713
714 memcpy(ctx->key, key, keylen);
715 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
716 ctx->cdata.keylen = keylen;
717
718 return gcm_set_sh_desc(aead);
719}
720
721static int rfc4106_set_sh_desc(struct crypto_aead *aead)
722{
723 struct caam_ctx *ctx = crypto_aead_ctx(aead);
724 struct device *dev = ctx->dev;
725 unsigned int ivsize = crypto_aead_ivsize(aead);
726 struct caam_flc *flc;
727 u32 *desc;
728 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
729 ctx->cdata.keylen;
730
731 if (!ctx->cdata.keylen || !ctx->authsize)
732 return 0;
733
734 ctx->cdata.key_virt = ctx->key;
735
736 /*
737 * RFC4106 encrypt shared descriptor
738 * Job Descriptor and Shared Descriptor
739 * must fit into the 64-word Descriptor h/w Buffer
740 */
741 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
742 ctx->cdata.key_inline = true;
743 } else {
744 ctx->cdata.key_inline = false;
745 ctx->cdata.key_dma = ctx->key_dma;
746 }
747
748 flc = &ctx->flc[ENCRYPT];
749 desc = flc->sh_desc;
750 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
751 true);
752 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
753 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
754 sizeof(flc->flc) + desc_bytes(desc),
755 ctx->dir);
756
757 /*
758 * Job Descriptor and Shared Descriptors
759 * must all fit into the 64-word Descriptor h/w Buffer
760 */
761 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
762 ctx->cdata.key_inline = true;
763 } else {
764 ctx->cdata.key_inline = false;
765 ctx->cdata.key_dma = ctx->key_dma;
766 }
767
768 flc = &ctx->flc[DECRYPT];
769 desc = flc->sh_desc;
770 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
771 true);
772 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
773 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
774 sizeof(flc->flc) + desc_bytes(desc),
775 ctx->dir);
776
777 return 0;
778}
779
780static int rfc4106_setauthsize(struct crypto_aead *authenc,
781 unsigned int authsize)
782{
783 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
784
785 ctx->authsize = authsize;
786 rfc4106_set_sh_desc(authenc);
787
788 return 0;
789}
790
791static int rfc4106_setkey(struct crypto_aead *aead,
792 const u8 *key, unsigned int keylen)
793{
794 struct caam_ctx *ctx = crypto_aead_ctx(aead);
795 struct device *dev = ctx->dev;
796
797 if (keylen < 4)
798 return -EINVAL;
799
800 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
801 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
802
803 memcpy(ctx->key, key, keylen);
804 /*
805 * The last four bytes of the key material are used as the salt value
806 * in the nonce. Update the AES key length.
807 */
808 ctx->cdata.keylen = keylen - 4;
809 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
810 ctx->dir);
811
812 return rfc4106_set_sh_desc(aead);
813}
814
815static int rfc4543_set_sh_desc(struct crypto_aead *aead)
816{
817 struct caam_ctx *ctx = crypto_aead_ctx(aead);
818 struct device *dev = ctx->dev;
819 unsigned int ivsize = crypto_aead_ivsize(aead);
820 struct caam_flc *flc;
821 u32 *desc;
822 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
823 ctx->cdata.keylen;
824
825 if (!ctx->cdata.keylen || !ctx->authsize)
826 return 0;
827
828 ctx->cdata.key_virt = ctx->key;
829
830 /*
831 * RFC4543 encrypt shared descriptor
832 * Job Descriptor and Shared Descriptor
833 * must fit into the 64-word Descriptor h/w Buffer
834 */
835 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
836 ctx->cdata.key_inline = true;
837 } else {
838 ctx->cdata.key_inline = false;
839 ctx->cdata.key_dma = ctx->key_dma;
840 }
841
842 flc = &ctx->flc[ENCRYPT];
843 desc = flc->sh_desc;
844 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
845 true);
846 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
847 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
848 sizeof(flc->flc) + desc_bytes(desc),
849 ctx->dir);
850
851 /*
852 * Job Descriptor and Shared Descriptors
853 * must all fit into the 64-word Descriptor h/w Buffer
854 */
855 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
856 ctx->cdata.key_inline = true;
857 } else {
858 ctx->cdata.key_inline = false;
859 ctx->cdata.key_dma = ctx->key_dma;
860 }
861
862 flc = &ctx->flc[DECRYPT];
863 desc = flc->sh_desc;
864 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
865 true);
866 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
867 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
868 sizeof(flc->flc) + desc_bytes(desc),
869 ctx->dir);
870
871 return 0;
872}
873
874static int rfc4543_setauthsize(struct crypto_aead *authenc,
875 unsigned int authsize)
876{
877 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
878
879 ctx->authsize = authsize;
880 rfc4543_set_sh_desc(authenc);
881
882 return 0;
883}
884
885static int rfc4543_setkey(struct crypto_aead *aead,
886 const u8 *key, unsigned int keylen)
887{
888 struct caam_ctx *ctx = crypto_aead_ctx(aead);
889 struct device *dev = ctx->dev;
890
891 if (keylen < 4)
892 return -EINVAL;
893
894 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
895 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
896
897 memcpy(ctx->key, key, keylen);
898 /*
899 * The last four bytes of the key material are used as the salt value
900 * in the nonce. Update the AES key length.
901 */
902 ctx->cdata.keylen = keylen - 4;
903 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
904 ctx->dir);
905
906 return rfc4543_set_sh_desc(aead);
907}
908
226853ac
HG
909static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
910 unsigned int keylen)
911{
912 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
913 struct caam_skcipher_alg *alg =
914 container_of(crypto_skcipher_alg(skcipher),
915 struct caam_skcipher_alg, skcipher);
916 struct device *dev = ctx->dev;
917 struct caam_flc *flc;
918 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
919 u32 *desc;
920 u32 ctx1_iv_off = 0;
921 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
c99d4a24
HG
922 OP_ALG_AAI_CTR_MOD128) &&
923 ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
924 OP_ALG_ALGSEL_CHACHA20);
226853ac
HG
925 const bool is_rfc3686 = alg->caam.rfc3686;
926
927 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
928 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
929
930 /*
931 * AES-CTR needs to load IV in CONTEXT1 reg
932 * at an offset of 128bits (16bytes)
933 * CONTEXT1[255:128] = IV
934 */
935 if (ctr_mode)
936 ctx1_iv_off = 16;
937
938 /*
939 * RFC3686 specific:
940 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
941 * | *key = {KEY, NONCE}
942 */
943 if (is_rfc3686) {
944 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
945 keylen -= CTR_RFC3686_NONCE_SIZE;
946 }
947
948 ctx->cdata.keylen = keylen;
949 ctx->cdata.key_virt = key;
950 ctx->cdata.key_inline = true;
951
952 /* skcipher_encrypt shared descriptor */
953 flc = &ctx->flc[ENCRYPT];
954 desc = flc->sh_desc;
955 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
956 ctx1_iv_off);
957 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
958 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
959 sizeof(flc->flc) + desc_bytes(desc),
960 ctx->dir);
961
962 /* skcipher_decrypt shared descriptor */
963 flc = &ctx->flc[DECRYPT];
964 desc = flc->sh_desc;
965 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
966 ctx1_iv_off);
967 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
968 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
969 sizeof(flc->flc) + desc_bytes(desc),
970 ctx->dir);
971
972 return 0;
973}
974
1b52c409
HX
975static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
976 const u8 *key, unsigned int keylen)
977{
978 return unlikely(des3_verify_key(skcipher, key)) ?:
979 skcipher_setkey(skcipher, key, keylen);
980}
981
226853ac
HG
982static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
983 unsigned int keylen)
984{
985 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
986 struct device *dev = ctx->dev;
987 struct caam_flc *flc;
988 u32 *desc;
989
990 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
991 dev_err(dev, "key size mismatch\n");
992 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
993 return -EINVAL;
994 }
995
996 ctx->cdata.keylen = keylen;
997 ctx->cdata.key_virt = key;
998 ctx->cdata.key_inline = true;
999
1000 /* xts_skcipher_encrypt shared descriptor */
1001 flc = &ctx->flc[ENCRYPT];
1002 desc = flc->sh_desc;
1003 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1004 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1005 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1006 sizeof(flc->flc) + desc_bytes(desc),
1007 ctx->dir);
1008
1009 /* xts_skcipher_decrypt shared descriptor */
1010 flc = &ctx->flc[DECRYPT];
1011 desc = flc->sh_desc;
1012 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1013 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1014 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1015 sizeof(flc->flc) + desc_bytes(desc),
1016 ctx->dir);
1017
1018 return 0;
1019}
1020
1021static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1022{
1023 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1024 struct caam_request *req_ctx = skcipher_request_ctx(req);
1025 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1026 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1027 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1028 struct device *dev = ctx->dev;
1029 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1030 GFP_KERNEL : GFP_ATOMIC;
1031 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1032 struct skcipher_edesc *edesc;
1033 dma_addr_t iv_dma;
1034 u8 *iv;
1035 int ivsize = crypto_skcipher_ivsize(skcipher);
1036 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1037 struct dpaa2_sg_entry *sg_table;
1038
1039 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1040 if (unlikely(src_nents < 0)) {
1041 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1042 req->cryptlen);
1043 return ERR_PTR(src_nents);
1044 }
1045
1046 if (unlikely(req->dst != req->src)) {
1047 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1048 if (unlikely(dst_nents < 0)) {
1049 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1050 req->cryptlen);
1051 return ERR_PTR(dst_nents);
1052 }
1053
1054 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1055 DMA_TO_DEVICE);
1056 if (unlikely(!mapped_src_nents)) {
1057 dev_err(dev, "unable to map source\n");
1058 return ERR_PTR(-ENOMEM);
1059 }
1060
1061 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1062 DMA_FROM_DEVICE);
1063 if (unlikely(!mapped_dst_nents)) {
1064 dev_err(dev, "unable to map destination\n");
1065 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1066 return ERR_PTR(-ENOMEM);
1067 }
1068 } else {
1069 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1070 DMA_BIDIRECTIONAL);
1071 if (unlikely(!mapped_src_nents)) {
1072 dev_err(dev, "unable to map source\n");
1073 return ERR_PTR(-ENOMEM);
1074 }
1075 }
1076
1077 qm_sg_ents = 1 + mapped_src_nents;
1078 dst_sg_idx = qm_sg_ents;
1079
1080 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1081 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1082 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1083 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1084 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1085 qm_sg_ents, ivsize);
1086 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1087 0, 0, 0);
1088 return ERR_PTR(-ENOMEM);
1089 }
1090
1091 /* allocate space for base edesc, link tables and IV */
1092 edesc = qi_cache_zalloc(GFP_DMA | flags);
1093 if (unlikely(!edesc)) {
1094 dev_err(dev, "could not allocate extended descriptor\n");
1095 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1096 0, 0, 0);
1097 return ERR_PTR(-ENOMEM);
1098 }
1099
1100 /* Make sure IV is located in a DMAable area */
1101 sg_table = &edesc->sgt[0];
1102 iv = (u8 *)(sg_table + qm_sg_ents);
1103 memcpy(iv, req->iv, ivsize);
1104
1105 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1106 if (dma_mapping_error(dev, iv_dma)) {
1107 dev_err(dev, "unable to map IV\n");
1108 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1109 0, 0, 0);
1110 qi_cache_free(edesc);
1111 return ERR_PTR(-ENOMEM);
1112 }
1113
1114 edesc->src_nents = src_nents;
1115 edesc->dst_nents = dst_nents;
1116 edesc->iv_dma = iv_dma;
1117 edesc->qm_sg_bytes = qm_sg_bytes;
1118
1119 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1120 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1121
1122 if (mapped_dst_nents > 1)
1123 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1124 dst_sg_idx, 0);
1125
1126 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1127 DMA_TO_DEVICE);
1128 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1129 dev_err(dev, "unable to map S/G table\n");
1130 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1131 iv_dma, ivsize, 0, 0);
1132 qi_cache_free(edesc);
1133 return ERR_PTR(-ENOMEM);
1134 }
1135
1136 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1137 dpaa2_fl_set_final(in_fle, true);
1138 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1139 dpaa2_fl_set_len(out_fle, req->cryptlen);
1140
1141 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1142 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1143
1144 if (req->src == req->dst) {
1145 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1146 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1147 sizeof(*sg_table));
1148 } else if (mapped_dst_nents > 1) {
1149 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1150 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1151 sizeof(*sg_table));
1152 } else {
1153 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
1154 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
1155 }
1156
1157 return edesc;
1158}
1159
8d818c10
HG
1160static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1161 struct aead_request *req)
1162{
1163 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1164 int ivsize = crypto_aead_ivsize(aead);
1165
1166 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1167 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1168 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1169}
1170
226853ac
HG
1171static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1172 struct skcipher_request *req)
1173{
1174 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1175 int ivsize = crypto_skcipher_ivsize(skcipher);
1176
1177 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1178 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
1179}
1180
8d818c10
HG
1181static void aead_encrypt_done(void *cbk_ctx, u32 status)
1182{
1183 struct crypto_async_request *areq = cbk_ctx;
1184 struct aead_request *req = container_of(areq, struct aead_request,
1185 base);
1186 struct caam_request *req_ctx = to_caam_req(areq);
1187 struct aead_edesc *edesc = req_ctx->edesc;
1188 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1189 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1190 int ecode = 0;
1191
1192 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1193
1194 if (unlikely(status)) {
1195 caam_qi2_strstatus(ctx->dev, status);
1196 ecode = -EIO;
1197 }
1198
1199 aead_unmap(ctx->dev, edesc, req);
1200 qi_cache_free(edesc);
1201 aead_request_complete(req, ecode);
1202}
1203
1204static void aead_decrypt_done(void *cbk_ctx, u32 status)
1205{
1206 struct crypto_async_request *areq = cbk_ctx;
1207 struct aead_request *req = container_of(areq, struct aead_request,
1208 base);
1209 struct caam_request *req_ctx = to_caam_req(areq);
1210 struct aead_edesc *edesc = req_ctx->edesc;
1211 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1212 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1213 int ecode = 0;
1214
1215 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1216
1217 if (unlikely(status)) {
1218 caam_qi2_strstatus(ctx->dev, status);
1219 /*
1220 * verify hw auth check passed else return -EBADMSG
1221 */
1222 if ((status & JRSTA_CCBERR_ERRID_MASK) ==
1223 JRSTA_CCBERR_ERRID_ICVCHK)
1224 ecode = -EBADMSG;
1225 else
1226 ecode = -EIO;
1227 }
1228
1229 aead_unmap(ctx->dev, edesc, req);
1230 qi_cache_free(edesc);
1231 aead_request_complete(req, ecode);
1232}
1233
1234static int aead_encrypt(struct aead_request *req)
1235{
1236 struct aead_edesc *edesc;
1237 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1238 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1239 struct caam_request *caam_req = aead_request_ctx(req);
1240 int ret;
1241
1242 /* allocate extended descriptor */
1243 edesc = aead_edesc_alloc(req, true);
1244 if (IS_ERR(edesc))
1245 return PTR_ERR(edesc);
1246
1247 caam_req->flc = &ctx->flc[ENCRYPT];
1248 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1249 caam_req->cbk = aead_encrypt_done;
1250 caam_req->ctx = &req->base;
1251 caam_req->edesc = edesc;
1252 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1253 if (ret != -EINPROGRESS &&
1254 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1255 aead_unmap(ctx->dev, edesc, req);
1256 qi_cache_free(edesc);
1257 }
1258
1259 return ret;
1260}
1261
1262static int aead_decrypt(struct aead_request *req)
1263{
1264 struct aead_edesc *edesc;
1265 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1266 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1267 struct caam_request *caam_req = aead_request_ctx(req);
1268 int ret;
1269
1270 /* allocate extended descriptor */
1271 edesc = aead_edesc_alloc(req, false);
1272 if (IS_ERR(edesc))
1273 return PTR_ERR(edesc);
1274
1275 caam_req->flc = &ctx->flc[DECRYPT];
1276 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1277 caam_req->cbk = aead_decrypt_done;
1278 caam_req->ctx = &req->base;
1279 caam_req->edesc = edesc;
1280 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1281 if (ret != -EINPROGRESS &&
1282 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1283 aead_unmap(ctx->dev, edesc, req);
1284 qi_cache_free(edesc);
1285 }
1286
1287 return ret;
1288}
1289
1290static int ipsec_gcm_encrypt(struct aead_request *req)
1291{
1292 if (req->assoclen < 8)
1293 return -EINVAL;
1294
1295 return aead_encrypt(req);
1296}
1297
1298static int ipsec_gcm_decrypt(struct aead_request *req)
1299{
1300 if (req->assoclen < 8)
1301 return -EINVAL;
1302
1303 return aead_decrypt(req);
1304}
1305
226853ac
HG
1306static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1307{
1308 struct crypto_async_request *areq = cbk_ctx;
1309 struct skcipher_request *req = skcipher_request_cast(areq);
1310 struct caam_request *req_ctx = to_caam_req(areq);
1311 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1312 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1313 struct skcipher_edesc *edesc = req_ctx->edesc;
1314 int ecode = 0;
1315 int ivsize = crypto_skcipher_ivsize(skcipher);
1316
1317 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1318
1319 if (unlikely(status)) {
1320 caam_qi2_strstatus(ctx->dev, status);
1321 ecode = -EIO;
1322 }
1323
1324 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1325 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1326 edesc->src_nents > 1 ? 100 : ivsize, 1);
1327 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1328 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1329 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1330
1331 skcipher_unmap(ctx->dev, edesc, req);
1332
1333 /*
1334 * The crypto API expects us to set the IV (req->iv) to the last
1335 * ciphertext block. This is used e.g. by the CTS mode.
1336 */
1337 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
1338 ivsize, 0);
1339
1340 qi_cache_free(edesc);
1341 skcipher_request_complete(req, ecode);
1342}
1343
1344static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1345{
1346 struct crypto_async_request *areq = cbk_ctx;
1347 struct skcipher_request *req = skcipher_request_cast(areq);
1348 struct caam_request *req_ctx = to_caam_req(areq);
1349 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1350 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1351 struct skcipher_edesc *edesc = req_ctx->edesc;
1352 int ecode = 0;
1353 int ivsize = crypto_skcipher_ivsize(skcipher);
1354
1355 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1356
1357 if (unlikely(status)) {
1358 caam_qi2_strstatus(ctx->dev, status);
1359 ecode = -EIO;
1360 }
1361
1362 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1363 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1364 edesc->src_nents > 1 ? 100 : ivsize, 1);
1365 caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
1366 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1367 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1368
1369 skcipher_unmap(ctx->dev, edesc, req);
1370 qi_cache_free(edesc);
1371 skcipher_request_complete(req, ecode);
1372}
1373
1374static int skcipher_encrypt(struct skcipher_request *req)
1375{
1376 struct skcipher_edesc *edesc;
1377 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1378 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1379 struct caam_request *caam_req = skcipher_request_ctx(req);
1380 int ret;
1381
1382 /* allocate extended descriptor */
1383 edesc = skcipher_edesc_alloc(req);
1384 if (IS_ERR(edesc))
1385 return PTR_ERR(edesc);
1386
1387 caam_req->flc = &ctx->flc[ENCRYPT];
1388 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1389 caam_req->cbk = skcipher_encrypt_done;
1390 caam_req->ctx = &req->base;
1391 caam_req->edesc = edesc;
1392 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1393 if (ret != -EINPROGRESS &&
1394 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1395 skcipher_unmap(ctx->dev, edesc, req);
1396 qi_cache_free(edesc);
1397 }
1398
1399 return ret;
1400}
1401
1402static int skcipher_decrypt(struct skcipher_request *req)
1403{
1404 struct skcipher_edesc *edesc;
1405 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1406 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1407 struct caam_request *caam_req = skcipher_request_ctx(req);
1408 int ivsize = crypto_skcipher_ivsize(skcipher);
1409 int ret;
1410
1411 /* allocate extended descriptor */
1412 edesc = skcipher_edesc_alloc(req);
1413 if (IS_ERR(edesc))
1414 return PTR_ERR(edesc);
1415
1416 /*
1417 * The crypto API expects us to set the IV (req->iv) to the last
1418 * ciphertext block.
1419 */
1420 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
1421 ivsize, 0);
1422
1423 caam_req->flc = &ctx->flc[DECRYPT];
1424 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1425 caam_req->cbk = skcipher_decrypt_done;
1426 caam_req->ctx = &req->base;
1427 caam_req->edesc = edesc;
1428 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1429 if (ret != -EINPROGRESS &&
1430 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1431 skcipher_unmap(ctx->dev, edesc, req);
1432 qi_cache_free(edesc);
1433 }
1434
1435 return ret;
1436}
1437
8d818c10
HG
1438static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1439 bool uses_dkp)
1440{
1441 dma_addr_t dma_addr;
1442 int i;
1443
1444 /* copy descriptor header template value */
1445 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1446 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1447
1448 ctx->dev = caam->dev;
1449 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1450
1451 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1452 offsetof(struct caam_ctx, flc_dma),
1453 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1454 if (dma_mapping_error(ctx->dev, dma_addr)) {
1455 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1456 return -ENOMEM;
1457 }
1458
1459 for (i = 0; i < NUM_OP; i++)
1460 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1461 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1462
1463 return 0;
1464}
1465
226853ac
HG
1466static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1467{
1468 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469 struct caam_skcipher_alg *caam_alg =
1470 container_of(alg, typeof(*caam_alg), skcipher);
1471
1472 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1473 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1474}
1475
8d818c10
HG
1476static int caam_cra_init_aead(struct crypto_aead *tfm)
1477{
1478 struct aead_alg *alg = crypto_aead_alg(tfm);
1479 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1480 aead);
1481
1482 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1483 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
24586b5f 1484 !caam_alg->caam.nodkp);
8d818c10
HG
1485}
1486
1487static void caam_exit_common(struct caam_ctx *ctx)
1488{
1489 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1490 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1491 DMA_ATTR_SKIP_CPU_SYNC);
1492}
1493
226853ac
HG
1494static void caam_cra_exit(struct crypto_skcipher *tfm)
1495{
1496 caam_exit_common(crypto_skcipher_ctx(tfm));
1497}
1498
8d818c10
HG
1499static void caam_cra_exit_aead(struct crypto_aead *tfm)
1500{
1501 caam_exit_common(crypto_aead_ctx(tfm));
1502}
1503
226853ac
HG
1504static struct caam_skcipher_alg driver_algs[] = {
1505 {
1506 .skcipher = {
1507 .base = {
1508 .cra_name = "cbc(aes)",
1509 .cra_driver_name = "cbc-aes-caam-qi2",
1510 .cra_blocksize = AES_BLOCK_SIZE,
1511 },
1512 .setkey = skcipher_setkey,
1513 .encrypt = skcipher_encrypt,
1514 .decrypt = skcipher_decrypt,
1515 .min_keysize = AES_MIN_KEY_SIZE,
1516 .max_keysize = AES_MAX_KEY_SIZE,
1517 .ivsize = AES_BLOCK_SIZE,
1518 },
1519 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1520 },
1521 {
1522 .skcipher = {
1523 .base = {
1524 .cra_name = "cbc(des3_ede)",
1525 .cra_driver_name = "cbc-3des-caam-qi2",
1526 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1527 },
1b52c409 1528 .setkey = des3_skcipher_setkey,
226853ac
HG
1529 .encrypt = skcipher_encrypt,
1530 .decrypt = skcipher_decrypt,
1531 .min_keysize = DES3_EDE_KEY_SIZE,
1532 .max_keysize = DES3_EDE_KEY_SIZE,
1533 .ivsize = DES3_EDE_BLOCK_SIZE,
1534 },
1535 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1536 },
1537 {
1538 .skcipher = {
1539 .base = {
1540 .cra_name = "cbc(des)",
1541 .cra_driver_name = "cbc-des-caam-qi2",
1542 .cra_blocksize = DES_BLOCK_SIZE,
1543 },
1544 .setkey = skcipher_setkey,
1545 .encrypt = skcipher_encrypt,
1546 .decrypt = skcipher_decrypt,
1547 .min_keysize = DES_KEY_SIZE,
1548 .max_keysize = DES_KEY_SIZE,
1549 .ivsize = DES_BLOCK_SIZE,
1550 },
1551 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1552 },
1553 {
1554 .skcipher = {
1555 .base = {
1556 .cra_name = "ctr(aes)",
1557 .cra_driver_name = "ctr-aes-caam-qi2",
1558 .cra_blocksize = 1,
1559 },
1560 .setkey = skcipher_setkey,
1561 .encrypt = skcipher_encrypt,
1562 .decrypt = skcipher_decrypt,
1563 .min_keysize = AES_MIN_KEY_SIZE,
1564 .max_keysize = AES_MAX_KEY_SIZE,
1565 .ivsize = AES_BLOCK_SIZE,
1566 .chunksize = AES_BLOCK_SIZE,
1567 },
1568 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1569 OP_ALG_AAI_CTR_MOD128,
1570 },
1571 {
1572 .skcipher = {
1573 .base = {
1574 .cra_name = "rfc3686(ctr(aes))",
1575 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1576 .cra_blocksize = 1,
1577 },
1578 .setkey = skcipher_setkey,
1579 .encrypt = skcipher_encrypt,
1580 .decrypt = skcipher_decrypt,
1581 .min_keysize = AES_MIN_KEY_SIZE +
1582 CTR_RFC3686_NONCE_SIZE,
1583 .max_keysize = AES_MAX_KEY_SIZE +
1584 CTR_RFC3686_NONCE_SIZE,
1585 .ivsize = CTR_RFC3686_IV_SIZE,
1586 .chunksize = AES_BLOCK_SIZE,
1587 },
1588 .caam = {
1589 .class1_alg_type = OP_ALG_ALGSEL_AES |
1590 OP_ALG_AAI_CTR_MOD128,
1591 .rfc3686 = true,
1592 },
1593 },
1594 {
1595 .skcipher = {
1596 .base = {
1597 .cra_name = "xts(aes)",
1598 .cra_driver_name = "xts-aes-caam-qi2",
1599 .cra_blocksize = AES_BLOCK_SIZE,
1600 },
1601 .setkey = xts_skcipher_setkey,
1602 .encrypt = skcipher_encrypt,
1603 .decrypt = skcipher_decrypt,
1604 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1605 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1606 .ivsize = AES_BLOCK_SIZE,
1607 },
1608 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
c99d4a24
HG
1609 },
1610 {
1611 .skcipher = {
1612 .base = {
1613 .cra_name = "chacha20",
1614 .cra_driver_name = "chacha20-caam-qi2",
1615 .cra_blocksize = 1,
1616 },
1617 .setkey = skcipher_setkey,
1618 .encrypt = skcipher_encrypt,
1619 .decrypt = skcipher_decrypt,
1ca1b917
EB
1620 .min_keysize = CHACHA_KEY_SIZE,
1621 .max_keysize = CHACHA_KEY_SIZE,
1622 .ivsize = CHACHA_IV_SIZE,
c99d4a24
HG
1623 },
1624 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1625 },
226853ac
HG
1626};
1627
8d818c10
HG
1628static struct caam_aead_alg driver_aeads[] = {
1629 {
1630 .aead = {
1631 .base = {
1632 .cra_name = "rfc4106(gcm(aes))",
1633 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1634 .cra_blocksize = 1,
1635 },
1636 .setkey = rfc4106_setkey,
1637 .setauthsize = rfc4106_setauthsize,
1638 .encrypt = ipsec_gcm_encrypt,
1639 .decrypt = ipsec_gcm_decrypt,
1640 .ivsize = 8,
1641 .maxauthsize = AES_BLOCK_SIZE,
1642 },
1643 .caam = {
1644 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
24586b5f 1645 .nodkp = true,
8d818c10
HG
1646 },
1647 },
1648 {
1649 .aead = {
1650 .base = {
1651 .cra_name = "rfc4543(gcm(aes))",
1652 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1653 .cra_blocksize = 1,
1654 },
1655 .setkey = rfc4543_setkey,
1656 .setauthsize = rfc4543_setauthsize,
1657 .encrypt = ipsec_gcm_encrypt,
1658 .decrypt = ipsec_gcm_decrypt,
1659 .ivsize = 8,
1660 .maxauthsize = AES_BLOCK_SIZE,
1661 },
1662 .caam = {
1663 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
24586b5f 1664 .nodkp = true,
8d818c10
HG
1665 },
1666 },
1667 /* Galois Counter Mode */
1668 {
1669 .aead = {
1670 .base = {
1671 .cra_name = "gcm(aes)",
1672 .cra_driver_name = "gcm-aes-caam-qi2",
1673 .cra_blocksize = 1,
1674 },
1675 .setkey = gcm_setkey,
1676 .setauthsize = gcm_setauthsize,
1677 .encrypt = aead_encrypt,
1678 .decrypt = aead_decrypt,
1679 .ivsize = 12,
1680 .maxauthsize = AES_BLOCK_SIZE,
1681 },
1682 .caam = {
1683 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
24586b5f 1684 .nodkp = true,
8d818c10
HG
1685 }
1686 },
1687 /* single-pass ipsec_esp descriptor */
1688 {
1689 .aead = {
1690 .base = {
1691 .cra_name = "authenc(hmac(md5),cbc(aes))",
1692 .cra_driver_name = "authenc-hmac-md5-"
1693 "cbc-aes-caam-qi2",
1694 .cra_blocksize = AES_BLOCK_SIZE,
1695 },
1696 .setkey = aead_setkey,
1697 .setauthsize = aead_setauthsize,
1698 .encrypt = aead_encrypt,
1699 .decrypt = aead_decrypt,
1700 .ivsize = AES_BLOCK_SIZE,
1701 .maxauthsize = MD5_DIGEST_SIZE,
1702 },
1703 .caam = {
1704 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1705 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1706 OP_ALG_AAI_HMAC_PRECOMP,
1707 }
1708 },
1709 {
1710 .aead = {
1711 .base = {
1712 .cra_name = "echainiv(authenc(hmac(md5),"
1713 "cbc(aes)))",
1714 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1715 "cbc-aes-caam-qi2",
1716 .cra_blocksize = AES_BLOCK_SIZE,
1717 },
1718 .setkey = aead_setkey,
1719 .setauthsize = aead_setauthsize,
1720 .encrypt = aead_encrypt,
1721 .decrypt = aead_decrypt,
1722 .ivsize = AES_BLOCK_SIZE,
1723 .maxauthsize = MD5_DIGEST_SIZE,
1724 },
1725 .caam = {
1726 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1727 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1728 OP_ALG_AAI_HMAC_PRECOMP,
1729 .geniv = true,
1730 }
1731 },
1732 {
1733 .aead = {
1734 .base = {
1735 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1736 .cra_driver_name = "authenc-hmac-sha1-"
1737 "cbc-aes-caam-qi2",
1738 .cra_blocksize = AES_BLOCK_SIZE,
1739 },
1740 .setkey = aead_setkey,
1741 .setauthsize = aead_setauthsize,
1742 .encrypt = aead_encrypt,
1743 .decrypt = aead_decrypt,
1744 .ivsize = AES_BLOCK_SIZE,
1745 .maxauthsize = SHA1_DIGEST_SIZE,
1746 },
1747 .caam = {
1748 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1749 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1750 OP_ALG_AAI_HMAC_PRECOMP,
1751 }
1752 },
1753 {
1754 .aead = {
1755 .base = {
1756 .cra_name = "echainiv(authenc(hmac(sha1),"
1757 "cbc(aes)))",
1758 .cra_driver_name = "echainiv-authenc-"
1759 "hmac-sha1-cbc-aes-caam-qi2",
1760 .cra_blocksize = AES_BLOCK_SIZE,
1761 },
1762 .setkey = aead_setkey,
1763 .setauthsize = aead_setauthsize,
1764 .encrypt = aead_encrypt,
1765 .decrypt = aead_decrypt,
1766 .ivsize = AES_BLOCK_SIZE,
1767 .maxauthsize = SHA1_DIGEST_SIZE,
1768 },
1769 .caam = {
1770 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1771 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1772 OP_ALG_AAI_HMAC_PRECOMP,
1773 .geniv = true,
1774 },
1775 },
1776 {
1777 .aead = {
1778 .base = {
1779 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1780 .cra_driver_name = "authenc-hmac-sha224-"
1781 "cbc-aes-caam-qi2",
1782 .cra_blocksize = AES_BLOCK_SIZE,
1783 },
1784 .setkey = aead_setkey,
1785 .setauthsize = aead_setauthsize,
1786 .encrypt = aead_encrypt,
1787 .decrypt = aead_decrypt,
1788 .ivsize = AES_BLOCK_SIZE,
1789 .maxauthsize = SHA224_DIGEST_SIZE,
1790 },
1791 .caam = {
1792 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1793 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1794 OP_ALG_AAI_HMAC_PRECOMP,
1795 }
1796 },
1797 {
1798 .aead = {
1799 .base = {
1800 .cra_name = "echainiv(authenc(hmac(sha224),"
1801 "cbc(aes)))",
1802 .cra_driver_name = "echainiv-authenc-"
1803 "hmac-sha224-cbc-aes-caam-qi2",
1804 .cra_blocksize = AES_BLOCK_SIZE,
1805 },
1806 .setkey = aead_setkey,
1807 .setauthsize = aead_setauthsize,
1808 .encrypt = aead_encrypt,
1809 .decrypt = aead_decrypt,
1810 .ivsize = AES_BLOCK_SIZE,
1811 .maxauthsize = SHA224_DIGEST_SIZE,
1812 },
1813 .caam = {
1814 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1815 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1816 OP_ALG_AAI_HMAC_PRECOMP,
1817 .geniv = true,
1818 }
1819 },
1820 {
1821 .aead = {
1822 .base = {
1823 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1824 .cra_driver_name = "authenc-hmac-sha256-"
1825 "cbc-aes-caam-qi2",
1826 .cra_blocksize = AES_BLOCK_SIZE,
1827 },
1828 .setkey = aead_setkey,
1829 .setauthsize = aead_setauthsize,
1830 .encrypt = aead_encrypt,
1831 .decrypt = aead_decrypt,
1832 .ivsize = AES_BLOCK_SIZE,
1833 .maxauthsize = SHA256_DIGEST_SIZE,
1834 },
1835 .caam = {
1836 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1837 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1838 OP_ALG_AAI_HMAC_PRECOMP,
1839 }
1840 },
1841 {
1842 .aead = {
1843 .base = {
1844 .cra_name = "echainiv(authenc(hmac(sha256),"
1845 "cbc(aes)))",
1846 .cra_driver_name = "echainiv-authenc-"
1847 "hmac-sha256-cbc-aes-"
1848 "caam-qi2",
1849 .cra_blocksize = AES_BLOCK_SIZE,
1850 },
1851 .setkey = aead_setkey,
1852 .setauthsize = aead_setauthsize,
1853 .encrypt = aead_encrypt,
1854 .decrypt = aead_decrypt,
1855 .ivsize = AES_BLOCK_SIZE,
1856 .maxauthsize = SHA256_DIGEST_SIZE,
1857 },
1858 .caam = {
1859 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1860 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1861 OP_ALG_AAI_HMAC_PRECOMP,
1862 .geniv = true,
1863 }
1864 },
1865 {
1866 .aead = {
1867 .base = {
1868 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1869 .cra_driver_name = "authenc-hmac-sha384-"
1870 "cbc-aes-caam-qi2",
1871 .cra_blocksize = AES_BLOCK_SIZE,
1872 },
1873 .setkey = aead_setkey,
1874 .setauthsize = aead_setauthsize,
1875 .encrypt = aead_encrypt,
1876 .decrypt = aead_decrypt,
1877 .ivsize = AES_BLOCK_SIZE,
1878 .maxauthsize = SHA384_DIGEST_SIZE,
1879 },
1880 .caam = {
1881 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1882 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1883 OP_ALG_AAI_HMAC_PRECOMP,
1884 }
1885 },
1886 {
1887 .aead = {
1888 .base = {
1889 .cra_name = "echainiv(authenc(hmac(sha384),"
1890 "cbc(aes)))",
1891 .cra_driver_name = "echainiv-authenc-"
1892 "hmac-sha384-cbc-aes-"
1893 "caam-qi2",
1894 .cra_blocksize = AES_BLOCK_SIZE,
1895 },
1896 .setkey = aead_setkey,
1897 .setauthsize = aead_setauthsize,
1898 .encrypt = aead_encrypt,
1899 .decrypt = aead_decrypt,
1900 .ivsize = AES_BLOCK_SIZE,
1901 .maxauthsize = SHA384_DIGEST_SIZE,
1902 },
1903 .caam = {
1904 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1905 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1906 OP_ALG_AAI_HMAC_PRECOMP,
1907 .geniv = true,
1908 }
1909 },
1910 {
1911 .aead = {
1912 .base = {
1913 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1914 .cra_driver_name = "authenc-hmac-sha512-"
1915 "cbc-aes-caam-qi2",
1916 .cra_blocksize = AES_BLOCK_SIZE,
1917 },
1918 .setkey = aead_setkey,
1919 .setauthsize = aead_setauthsize,
1920 .encrypt = aead_encrypt,
1921 .decrypt = aead_decrypt,
1922 .ivsize = AES_BLOCK_SIZE,
1923 .maxauthsize = SHA512_DIGEST_SIZE,
1924 },
1925 .caam = {
1926 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1927 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1928 OP_ALG_AAI_HMAC_PRECOMP,
1929 }
1930 },
1931 {
1932 .aead = {
1933 .base = {
1934 .cra_name = "echainiv(authenc(hmac(sha512),"
1935 "cbc(aes)))",
1936 .cra_driver_name = "echainiv-authenc-"
1937 "hmac-sha512-cbc-aes-"
1938 "caam-qi2",
1939 .cra_blocksize = AES_BLOCK_SIZE,
1940 },
1941 .setkey = aead_setkey,
1942 .setauthsize = aead_setauthsize,
1943 .encrypt = aead_encrypt,
1944 .decrypt = aead_decrypt,
1945 .ivsize = AES_BLOCK_SIZE,
1946 .maxauthsize = SHA512_DIGEST_SIZE,
1947 },
1948 .caam = {
1949 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1950 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1951 OP_ALG_AAI_HMAC_PRECOMP,
1952 .geniv = true,
1953 }
1954 },
1955 {
1956 .aead = {
1957 .base = {
1958 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1959 .cra_driver_name = "authenc-hmac-md5-"
1960 "cbc-des3_ede-caam-qi2",
1961 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1962 },
1b52c409 1963 .setkey = des3_aead_setkey,
8d818c10
HG
1964 .setauthsize = aead_setauthsize,
1965 .encrypt = aead_encrypt,
1966 .decrypt = aead_decrypt,
1967 .ivsize = DES3_EDE_BLOCK_SIZE,
1968 .maxauthsize = MD5_DIGEST_SIZE,
1969 },
1970 .caam = {
1971 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1972 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1973 OP_ALG_AAI_HMAC_PRECOMP,
1974 }
1975 },
1976 {
1977 .aead = {
1978 .base = {
1979 .cra_name = "echainiv(authenc(hmac(md5),"
1980 "cbc(des3_ede)))",
1981 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1982 "cbc-des3_ede-caam-qi2",
1983 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1984 },
1b52c409 1985 .setkey = des3_aead_setkey,
8d818c10
HG
1986 .setauthsize = aead_setauthsize,
1987 .encrypt = aead_encrypt,
1988 .decrypt = aead_decrypt,
1989 .ivsize = DES3_EDE_BLOCK_SIZE,
1990 .maxauthsize = MD5_DIGEST_SIZE,
1991 },
1992 .caam = {
1993 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1994 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1995 OP_ALG_AAI_HMAC_PRECOMP,
1996 .geniv = true,
1997 }
1998 },
1999 {
2000 .aead = {
2001 .base = {
2002 .cra_name = "authenc(hmac(sha1),"
2003 "cbc(des3_ede))",
2004 .cra_driver_name = "authenc-hmac-sha1-"
2005 "cbc-des3_ede-caam-qi2",
2006 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2007 },
1b52c409 2008 .setkey = des3_aead_setkey,
8d818c10
HG
2009 .setauthsize = aead_setauthsize,
2010 .encrypt = aead_encrypt,
2011 .decrypt = aead_decrypt,
2012 .ivsize = DES3_EDE_BLOCK_SIZE,
2013 .maxauthsize = SHA1_DIGEST_SIZE,
2014 },
2015 .caam = {
2016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2017 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2018 OP_ALG_AAI_HMAC_PRECOMP,
2019 },
2020 },
2021 {
2022 .aead = {
2023 .base = {
2024 .cra_name = "echainiv(authenc(hmac(sha1),"
2025 "cbc(des3_ede)))",
2026 .cra_driver_name = "echainiv-authenc-"
2027 "hmac-sha1-"
2028 "cbc-des3_ede-caam-qi2",
2029 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2030 },
1b52c409 2031 .setkey = des3_aead_setkey,
8d818c10
HG
2032 .setauthsize = aead_setauthsize,
2033 .encrypt = aead_encrypt,
2034 .decrypt = aead_decrypt,
2035 .ivsize = DES3_EDE_BLOCK_SIZE,
2036 .maxauthsize = SHA1_DIGEST_SIZE,
2037 },
2038 .caam = {
2039 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2040 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2041 OP_ALG_AAI_HMAC_PRECOMP,
2042 .geniv = true,
2043 }
2044 },
2045 {
2046 .aead = {
2047 .base = {
2048 .cra_name = "authenc(hmac(sha224),"
2049 "cbc(des3_ede))",
2050 .cra_driver_name = "authenc-hmac-sha224-"
2051 "cbc-des3_ede-caam-qi2",
2052 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2053 },
1b52c409 2054 .setkey = des3_aead_setkey,
8d818c10
HG
2055 .setauthsize = aead_setauthsize,
2056 .encrypt = aead_encrypt,
2057 .decrypt = aead_decrypt,
2058 .ivsize = DES3_EDE_BLOCK_SIZE,
2059 .maxauthsize = SHA224_DIGEST_SIZE,
2060 },
2061 .caam = {
2062 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2063 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2064 OP_ALG_AAI_HMAC_PRECOMP,
2065 },
2066 },
2067 {
2068 .aead = {
2069 .base = {
2070 .cra_name = "echainiv(authenc(hmac(sha224),"
2071 "cbc(des3_ede)))",
2072 .cra_driver_name = "echainiv-authenc-"
2073 "hmac-sha224-"
2074 "cbc-des3_ede-caam-qi2",
2075 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2076 },
1b52c409 2077 .setkey = des3_aead_setkey,
8d818c10
HG
2078 .setauthsize = aead_setauthsize,
2079 .encrypt = aead_encrypt,
2080 .decrypt = aead_decrypt,
2081 .ivsize = DES3_EDE_BLOCK_SIZE,
2082 .maxauthsize = SHA224_DIGEST_SIZE,
2083 },
2084 .caam = {
2085 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2086 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2087 OP_ALG_AAI_HMAC_PRECOMP,
2088 .geniv = true,
2089 }
2090 },
2091 {
2092 .aead = {
2093 .base = {
2094 .cra_name = "authenc(hmac(sha256),"
2095 "cbc(des3_ede))",
2096 .cra_driver_name = "authenc-hmac-sha256-"
2097 "cbc-des3_ede-caam-qi2",
2098 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2099 },
1b52c409 2100 .setkey = des3_aead_setkey,
8d818c10
HG
2101 .setauthsize = aead_setauthsize,
2102 .encrypt = aead_encrypt,
2103 .decrypt = aead_decrypt,
2104 .ivsize = DES3_EDE_BLOCK_SIZE,
2105 .maxauthsize = SHA256_DIGEST_SIZE,
2106 },
2107 .caam = {
2108 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2109 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2110 OP_ALG_AAI_HMAC_PRECOMP,
2111 },
2112 },
2113 {
2114 .aead = {
2115 .base = {
2116 .cra_name = "echainiv(authenc(hmac(sha256),"
2117 "cbc(des3_ede)))",
2118 .cra_driver_name = "echainiv-authenc-"
2119 "hmac-sha256-"
2120 "cbc-des3_ede-caam-qi2",
2121 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2122 },
1b52c409 2123 .setkey = des3_aead_setkey,
8d818c10
HG
2124 .setauthsize = aead_setauthsize,
2125 .encrypt = aead_encrypt,
2126 .decrypt = aead_decrypt,
2127 .ivsize = DES3_EDE_BLOCK_SIZE,
2128 .maxauthsize = SHA256_DIGEST_SIZE,
2129 },
2130 .caam = {
2131 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2132 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2133 OP_ALG_AAI_HMAC_PRECOMP,
2134 .geniv = true,
2135 }
2136 },
2137 {
2138 .aead = {
2139 .base = {
2140 .cra_name = "authenc(hmac(sha384),"
2141 "cbc(des3_ede))",
2142 .cra_driver_name = "authenc-hmac-sha384-"
2143 "cbc-des3_ede-caam-qi2",
2144 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2145 },
1b52c409 2146 .setkey = des3_aead_setkey,
8d818c10
HG
2147 .setauthsize = aead_setauthsize,
2148 .encrypt = aead_encrypt,
2149 .decrypt = aead_decrypt,
2150 .ivsize = DES3_EDE_BLOCK_SIZE,
2151 .maxauthsize = SHA384_DIGEST_SIZE,
2152 },
2153 .caam = {
2154 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2155 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2156 OP_ALG_AAI_HMAC_PRECOMP,
2157 },
2158 },
2159 {
2160 .aead = {
2161 .base = {
2162 .cra_name = "echainiv(authenc(hmac(sha384),"
2163 "cbc(des3_ede)))",
2164 .cra_driver_name = "echainiv-authenc-"
2165 "hmac-sha384-"
2166 "cbc-des3_ede-caam-qi2",
2167 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2168 },
1b52c409 2169 .setkey = des3_aead_setkey,
8d818c10
HG
2170 .setauthsize = aead_setauthsize,
2171 .encrypt = aead_encrypt,
2172 .decrypt = aead_decrypt,
2173 .ivsize = DES3_EDE_BLOCK_SIZE,
2174 .maxauthsize = SHA384_DIGEST_SIZE,
2175 },
2176 .caam = {
2177 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2178 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2179 OP_ALG_AAI_HMAC_PRECOMP,
2180 .geniv = true,
2181 }
2182 },
2183 {
2184 .aead = {
2185 .base = {
2186 .cra_name = "authenc(hmac(sha512),"
2187 "cbc(des3_ede))",
2188 .cra_driver_name = "authenc-hmac-sha512-"
2189 "cbc-des3_ede-caam-qi2",
2190 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2191 },
1b52c409 2192 .setkey = des3_aead_setkey,
8d818c10
HG
2193 .setauthsize = aead_setauthsize,
2194 .encrypt = aead_encrypt,
2195 .decrypt = aead_decrypt,
2196 .ivsize = DES3_EDE_BLOCK_SIZE,
2197 .maxauthsize = SHA512_DIGEST_SIZE,
2198 },
2199 .caam = {
2200 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2201 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2202 OP_ALG_AAI_HMAC_PRECOMP,
2203 },
2204 },
2205 {
2206 .aead = {
2207 .base = {
2208 .cra_name = "echainiv(authenc(hmac(sha512),"
2209 "cbc(des3_ede)))",
2210 .cra_driver_name = "echainiv-authenc-"
2211 "hmac-sha512-"
2212 "cbc-des3_ede-caam-qi2",
2213 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2214 },
1b52c409 2215 .setkey = des3_aead_setkey,
8d818c10
HG
2216 .setauthsize = aead_setauthsize,
2217 .encrypt = aead_encrypt,
2218 .decrypt = aead_decrypt,
2219 .ivsize = DES3_EDE_BLOCK_SIZE,
2220 .maxauthsize = SHA512_DIGEST_SIZE,
2221 },
2222 .caam = {
2223 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2224 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2225 OP_ALG_AAI_HMAC_PRECOMP,
2226 .geniv = true,
2227 }
2228 },
2229 {
2230 .aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(md5),cbc(des))",
2233 .cra_driver_name = "authenc-hmac-md5-"
2234 "cbc-des-caam-qi2",
2235 .cra_blocksize = DES_BLOCK_SIZE,
2236 },
2237 .setkey = aead_setkey,
2238 .setauthsize = aead_setauthsize,
2239 .encrypt = aead_encrypt,
2240 .decrypt = aead_decrypt,
2241 .ivsize = DES_BLOCK_SIZE,
2242 .maxauthsize = MD5_DIGEST_SIZE,
2243 },
2244 .caam = {
2245 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2246 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2247 OP_ALG_AAI_HMAC_PRECOMP,
2248 },
2249 },
2250 {
2251 .aead = {
2252 .base = {
2253 .cra_name = "echainiv(authenc(hmac(md5),"
2254 "cbc(des)))",
2255 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2256 "cbc-des-caam-qi2",
2257 .cra_blocksize = DES_BLOCK_SIZE,
2258 },
2259 .setkey = aead_setkey,
2260 .setauthsize = aead_setauthsize,
2261 .encrypt = aead_encrypt,
2262 .decrypt = aead_decrypt,
2263 .ivsize = DES_BLOCK_SIZE,
2264 .maxauthsize = MD5_DIGEST_SIZE,
2265 },
2266 .caam = {
2267 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2268 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2269 OP_ALG_AAI_HMAC_PRECOMP,
2270 .geniv = true,
2271 }
2272 },
2273 {
2274 .aead = {
2275 .base = {
2276 .cra_name = "authenc(hmac(sha1),cbc(des))",
2277 .cra_driver_name = "authenc-hmac-sha1-"
2278 "cbc-des-caam-qi2",
2279 .cra_blocksize = DES_BLOCK_SIZE,
2280 },
2281 .setkey = aead_setkey,
2282 .setauthsize = aead_setauthsize,
2283 .encrypt = aead_encrypt,
2284 .decrypt = aead_decrypt,
2285 .ivsize = DES_BLOCK_SIZE,
2286 .maxauthsize = SHA1_DIGEST_SIZE,
2287 },
2288 .caam = {
2289 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2290 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2291 OP_ALG_AAI_HMAC_PRECOMP,
2292 },
2293 },
2294 {
2295 .aead = {
2296 .base = {
2297 .cra_name = "echainiv(authenc(hmac(sha1),"
2298 "cbc(des)))",
2299 .cra_driver_name = "echainiv-authenc-"
2300 "hmac-sha1-cbc-des-caam-qi2",
2301 .cra_blocksize = DES_BLOCK_SIZE,
2302 },
2303 .setkey = aead_setkey,
2304 .setauthsize = aead_setauthsize,
2305 .encrypt = aead_encrypt,
2306 .decrypt = aead_decrypt,
2307 .ivsize = DES_BLOCK_SIZE,
2308 .maxauthsize = SHA1_DIGEST_SIZE,
2309 },
2310 .caam = {
2311 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2312 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2313 OP_ALG_AAI_HMAC_PRECOMP,
2314 .geniv = true,
2315 }
2316 },
2317 {
2318 .aead = {
2319 .base = {
2320 .cra_name = "authenc(hmac(sha224),cbc(des))",
2321 .cra_driver_name = "authenc-hmac-sha224-"
2322 "cbc-des-caam-qi2",
2323 .cra_blocksize = DES_BLOCK_SIZE,
2324 },
2325 .setkey = aead_setkey,
2326 .setauthsize = aead_setauthsize,
2327 .encrypt = aead_encrypt,
2328 .decrypt = aead_decrypt,
2329 .ivsize = DES_BLOCK_SIZE,
2330 .maxauthsize = SHA224_DIGEST_SIZE,
2331 },
2332 .caam = {
2333 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2334 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2335 OP_ALG_AAI_HMAC_PRECOMP,
2336 },
2337 },
2338 {
2339 .aead = {
2340 .base = {
2341 .cra_name = "echainiv(authenc(hmac(sha224),"
2342 "cbc(des)))",
2343 .cra_driver_name = "echainiv-authenc-"
2344 "hmac-sha224-cbc-des-"
2345 "caam-qi2",
2346 .cra_blocksize = DES_BLOCK_SIZE,
2347 },
2348 .setkey = aead_setkey,
2349 .setauthsize = aead_setauthsize,
2350 .encrypt = aead_encrypt,
2351 .decrypt = aead_decrypt,
2352 .ivsize = DES_BLOCK_SIZE,
2353 .maxauthsize = SHA224_DIGEST_SIZE,
2354 },
2355 .caam = {
2356 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2357 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2358 OP_ALG_AAI_HMAC_PRECOMP,
2359 .geniv = true,
2360 }
2361 },
2362 {
2363 .aead = {
2364 .base = {
2365 .cra_name = "authenc(hmac(sha256),cbc(des))",
2366 .cra_driver_name = "authenc-hmac-sha256-"
2367 "cbc-des-caam-qi2",
2368 .cra_blocksize = DES_BLOCK_SIZE,
2369 },
2370 .setkey = aead_setkey,
2371 .setauthsize = aead_setauthsize,
2372 .encrypt = aead_encrypt,
2373 .decrypt = aead_decrypt,
2374 .ivsize = DES_BLOCK_SIZE,
2375 .maxauthsize = SHA256_DIGEST_SIZE,
2376 },
2377 .caam = {
2378 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2379 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2380 OP_ALG_AAI_HMAC_PRECOMP,
2381 },
2382 },
2383 {
2384 .aead = {
2385 .base = {
2386 .cra_name = "echainiv(authenc(hmac(sha256),"
2387 "cbc(des)))",
2388 .cra_driver_name = "echainiv-authenc-"
2389 "hmac-sha256-cbc-desi-"
2390 "caam-qi2",
2391 .cra_blocksize = DES_BLOCK_SIZE,
2392 },
2393 .setkey = aead_setkey,
2394 .setauthsize = aead_setauthsize,
2395 .encrypt = aead_encrypt,
2396 .decrypt = aead_decrypt,
2397 .ivsize = DES_BLOCK_SIZE,
2398 .maxauthsize = SHA256_DIGEST_SIZE,
2399 },
2400 .caam = {
2401 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2402 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2403 OP_ALG_AAI_HMAC_PRECOMP,
2404 .geniv = true,
2405 },
2406 },
2407 {
2408 .aead = {
2409 .base = {
2410 .cra_name = "authenc(hmac(sha384),cbc(des))",
2411 .cra_driver_name = "authenc-hmac-sha384-"
2412 "cbc-des-caam-qi2",
2413 .cra_blocksize = DES_BLOCK_SIZE,
2414 },
2415 .setkey = aead_setkey,
2416 .setauthsize = aead_setauthsize,
2417 .encrypt = aead_encrypt,
2418 .decrypt = aead_decrypt,
2419 .ivsize = DES_BLOCK_SIZE,
2420 .maxauthsize = SHA384_DIGEST_SIZE,
2421 },
2422 .caam = {
2423 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2424 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2425 OP_ALG_AAI_HMAC_PRECOMP,
2426 },
2427 },
2428 {
2429 .aead = {
2430 .base = {
2431 .cra_name = "echainiv(authenc(hmac(sha384),"
2432 "cbc(des)))",
2433 .cra_driver_name = "echainiv-authenc-"
2434 "hmac-sha384-cbc-des-"
2435 "caam-qi2",
2436 .cra_blocksize = DES_BLOCK_SIZE,
2437 },
2438 .setkey = aead_setkey,
2439 .setauthsize = aead_setauthsize,
2440 .encrypt = aead_encrypt,
2441 .decrypt = aead_decrypt,
2442 .ivsize = DES_BLOCK_SIZE,
2443 .maxauthsize = SHA384_DIGEST_SIZE,
2444 },
2445 .caam = {
2446 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2447 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2448 OP_ALG_AAI_HMAC_PRECOMP,
2449 .geniv = true,
2450 }
2451 },
2452 {
2453 .aead = {
2454 .base = {
2455 .cra_name = "authenc(hmac(sha512),cbc(des))",
2456 .cra_driver_name = "authenc-hmac-sha512-"
2457 "cbc-des-caam-qi2",
2458 .cra_blocksize = DES_BLOCK_SIZE,
2459 },
2460 .setkey = aead_setkey,
2461 .setauthsize = aead_setauthsize,
2462 .encrypt = aead_encrypt,
2463 .decrypt = aead_decrypt,
2464 .ivsize = DES_BLOCK_SIZE,
2465 .maxauthsize = SHA512_DIGEST_SIZE,
2466 },
2467 .caam = {
2468 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2469 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2470 OP_ALG_AAI_HMAC_PRECOMP,
2471 }
2472 },
2473 {
2474 .aead = {
2475 .base = {
2476 .cra_name = "echainiv(authenc(hmac(sha512),"
2477 "cbc(des)))",
2478 .cra_driver_name = "echainiv-authenc-"
2479 "hmac-sha512-cbc-des-"
2480 "caam-qi2",
2481 .cra_blocksize = DES_BLOCK_SIZE,
2482 },
2483 .setkey = aead_setkey,
2484 .setauthsize = aead_setauthsize,
2485 .encrypt = aead_encrypt,
2486 .decrypt = aead_decrypt,
2487 .ivsize = DES_BLOCK_SIZE,
2488 .maxauthsize = SHA512_DIGEST_SIZE,
2489 },
2490 .caam = {
2491 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2492 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2493 OP_ALG_AAI_HMAC_PRECOMP,
2494 .geniv = true,
2495 }
2496 },
2497 {
2498 .aead = {
2499 .base = {
2500 .cra_name = "authenc(hmac(md5),"
2501 "rfc3686(ctr(aes)))",
2502 .cra_driver_name = "authenc-hmac-md5-"
2503 "rfc3686-ctr-aes-caam-qi2",
2504 .cra_blocksize = 1,
2505 },
2506 .setkey = aead_setkey,
2507 .setauthsize = aead_setauthsize,
2508 .encrypt = aead_encrypt,
2509 .decrypt = aead_decrypt,
2510 .ivsize = CTR_RFC3686_IV_SIZE,
2511 .maxauthsize = MD5_DIGEST_SIZE,
2512 },
2513 .caam = {
2514 .class1_alg_type = OP_ALG_ALGSEL_AES |
2515 OP_ALG_AAI_CTR_MOD128,
2516 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2517 OP_ALG_AAI_HMAC_PRECOMP,
2518 .rfc3686 = true,
2519 },
2520 },
2521 {
2522 .aead = {
2523 .base = {
2524 .cra_name = "seqiv(authenc("
2525 "hmac(md5),rfc3686(ctr(aes))))",
2526 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2527 "rfc3686-ctr-aes-caam-qi2",
2528 .cra_blocksize = 1,
2529 },
2530 .setkey = aead_setkey,
2531 .setauthsize = aead_setauthsize,
2532 .encrypt = aead_encrypt,
2533 .decrypt = aead_decrypt,
2534 .ivsize = CTR_RFC3686_IV_SIZE,
2535 .maxauthsize = MD5_DIGEST_SIZE,
2536 },
2537 .caam = {
2538 .class1_alg_type = OP_ALG_ALGSEL_AES |
2539 OP_ALG_AAI_CTR_MOD128,
2540 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2541 OP_ALG_AAI_HMAC_PRECOMP,
2542 .rfc3686 = true,
2543 .geniv = true,
2544 },
2545 },
2546 {
2547 .aead = {
2548 .base = {
2549 .cra_name = "authenc(hmac(sha1),"
2550 "rfc3686(ctr(aes)))",
2551 .cra_driver_name = "authenc-hmac-sha1-"
2552 "rfc3686-ctr-aes-caam-qi2",
2553 .cra_blocksize = 1,
2554 },
2555 .setkey = aead_setkey,
2556 .setauthsize = aead_setauthsize,
2557 .encrypt = aead_encrypt,
2558 .decrypt = aead_decrypt,
2559 .ivsize = CTR_RFC3686_IV_SIZE,
2560 .maxauthsize = SHA1_DIGEST_SIZE,
2561 },
2562 .caam = {
2563 .class1_alg_type = OP_ALG_ALGSEL_AES |
2564 OP_ALG_AAI_CTR_MOD128,
2565 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2566 OP_ALG_AAI_HMAC_PRECOMP,
2567 .rfc3686 = true,
2568 },
2569 },
2570 {
2571 .aead = {
2572 .base = {
2573 .cra_name = "seqiv(authenc("
2574 "hmac(sha1),rfc3686(ctr(aes))))",
2575 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2576 "rfc3686-ctr-aes-caam-qi2",
2577 .cra_blocksize = 1,
2578 },
2579 .setkey = aead_setkey,
2580 .setauthsize = aead_setauthsize,
2581 .encrypt = aead_encrypt,
2582 .decrypt = aead_decrypt,
2583 .ivsize = CTR_RFC3686_IV_SIZE,
2584 .maxauthsize = SHA1_DIGEST_SIZE,
2585 },
2586 .caam = {
2587 .class1_alg_type = OP_ALG_ALGSEL_AES |
2588 OP_ALG_AAI_CTR_MOD128,
2589 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2590 OP_ALG_AAI_HMAC_PRECOMP,
2591 .rfc3686 = true,
2592 .geniv = true,
2593 },
2594 },
2595 {
2596 .aead = {
2597 .base = {
2598 .cra_name = "authenc(hmac(sha224),"
2599 "rfc3686(ctr(aes)))",
2600 .cra_driver_name = "authenc-hmac-sha224-"
2601 "rfc3686-ctr-aes-caam-qi2",
2602 .cra_blocksize = 1,
2603 },
2604 .setkey = aead_setkey,
2605 .setauthsize = aead_setauthsize,
2606 .encrypt = aead_encrypt,
2607 .decrypt = aead_decrypt,
2608 .ivsize = CTR_RFC3686_IV_SIZE,
2609 .maxauthsize = SHA224_DIGEST_SIZE,
2610 },
2611 .caam = {
2612 .class1_alg_type = OP_ALG_ALGSEL_AES |
2613 OP_ALG_AAI_CTR_MOD128,
2614 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2615 OP_ALG_AAI_HMAC_PRECOMP,
2616 .rfc3686 = true,
2617 },
2618 },
2619 {
2620 .aead = {
2621 .base = {
2622 .cra_name = "seqiv(authenc("
2623 "hmac(sha224),rfc3686(ctr(aes))))",
2624 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2625 "rfc3686-ctr-aes-caam-qi2",
2626 .cra_blocksize = 1,
2627 },
2628 .setkey = aead_setkey,
2629 .setauthsize = aead_setauthsize,
2630 .encrypt = aead_encrypt,
2631 .decrypt = aead_decrypt,
2632 .ivsize = CTR_RFC3686_IV_SIZE,
2633 .maxauthsize = SHA224_DIGEST_SIZE,
2634 },
2635 .caam = {
2636 .class1_alg_type = OP_ALG_ALGSEL_AES |
2637 OP_ALG_AAI_CTR_MOD128,
2638 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2639 OP_ALG_AAI_HMAC_PRECOMP,
2640 .rfc3686 = true,
2641 .geniv = true,
2642 },
2643 },
2644 {
2645 .aead = {
2646 .base = {
2647 .cra_name = "authenc(hmac(sha256),"
2648 "rfc3686(ctr(aes)))",
2649 .cra_driver_name = "authenc-hmac-sha256-"
2650 "rfc3686-ctr-aes-caam-qi2",
2651 .cra_blocksize = 1,
2652 },
2653 .setkey = aead_setkey,
2654 .setauthsize = aead_setauthsize,
2655 .encrypt = aead_encrypt,
2656 .decrypt = aead_decrypt,
2657 .ivsize = CTR_RFC3686_IV_SIZE,
2658 .maxauthsize = SHA256_DIGEST_SIZE,
2659 },
2660 .caam = {
2661 .class1_alg_type = OP_ALG_ALGSEL_AES |
2662 OP_ALG_AAI_CTR_MOD128,
2663 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2664 OP_ALG_AAI_HMAC_PRECOMP,
2665 .rfc3686 = true,
2666 },
2667 },
2668 {
2669 .aead = {
2670 .base = {
2671 .cra_name = "seqiv(authenc(hmac(sha256),"
2672 "rfc3686(ctr(aes))))",
2673 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2674 "rfc3686-ctr-aes-caam-qi2",
2675 .cra_blocksize = 1,
2676 },
2677 .setkey = aead_setkey,
2678 .setauthsize = aead_setauthsize,
2679 .encrypt = aead_encrypt,
2680 .decrypt = aead_decrypt,
2681 .ivsize = CTR_RFC3686_IV_SIZE,
2682 .maxauthsize = SHA256_DIGEST_SIZE,
2683 },
2684 .caam = {
2685 .class1_alg_type = OP_ALG_ALGSEL_AES |
2686 OP_ALG_AAI_CTR_MOD128,
2687 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2688 OP_ALG_AAI_HMAC_PRECOMP,
2689 .rfc3686 = true,
2690 .geniv = true,
2691 },
2692 },
2693 {
2694 .aead = {
2695 .base = {
2696 .cra_name = "authenc(hmac(sha384),"
2697 "rfc3686(ctr(aes)))",
2698 .cra_driver_name = "authenc-hmac-sha384-"
2699 "rfc3686-ctr-aes-caam-qi2",
2700 .cra_blocksize = 1,
2701 },
2702 .setkey = aead_setkey,
2703 .setauthsize = aead_setauthsize,
2704 .encrypt = aead_encrypt,
2705 .decrypt = aead_decrypt,
2706 .ivsize = CTR_RFC3686_IV_SIZE,
2707 .maxauthsize = SHA384_DIGEST_SIZE,
2708 },
2709 .caam = {
2710 .class1_alg_type = OP_ALG_ALGSEL_AES |
2711 OP_ALG_AAI_CTR_MOD128,
2712 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2713 OP_ALG_AAI_HMAC_PRECOMP,
2714 .rfc3686 = true,
2715 },
2716 },
2717 {
2718 .aead = {
2719 .base = {
2720 .cra_name = "seqiv(authenc(hmac(sha384),"
2721 "rfc3686(ctr(aes))))",
2722 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2723 "rfc3686-ctr-aes-caam-qi2",
2724 .cra_blocksize = 1,
2725 },
2726 .setkey = aead_setkey,
2727 .setauthsize = aead_setauthsize,
2728 .encrypt = aead_encrypt,
2729 .decrypt = aead_decrypt,
2730 .ivsize = CTR_RFC3686_IV_SIZE,
2731 .maxauthsize = SHA384_DIGEST_SIZE,
2732 },
2733 .caam = {
2734 .class1_alg_type = OP_ALG_ALGSEL_AES |
2735 OP_ALG_AAI_CTR_MOD128,
2736 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2737 OP_ALG_AAI_HMAC_PRECOMP,
2738 .rfc3686 = true,
2739 .geniv = true,
2740 },
2741 },
c10a5336
HG
2742 {
2743 .aead = {
2744 .base = {
2745 .cra_name = "rfc7539(chacha20,poly1305)",
2746 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2747 "caam-qi2",
2748 .cra_blocksize = 1,
2749 },
2750 .setkey = chachapoly_setkey,
2751 .setauthsize = chachapoly_setauthsize,
2752 .encrypt = aead_encrypt,
2753 .decrypt = aead_decrypt,
2754 .ivsize = CHACHAPOLY_IV_SIZE,
2755 .maxauthsize = POLY1305_DIGEST_SIZE,
2756 },
2757 .caam = {
2758 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2759 OP_ALG_AAI_AEAD,
2760 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2761 OP_ALG_AAI_AEAD,
24586b5f 2762 .nodkp = true,
c10a5336
HG
2763 },
2764 },
2765 {
2766 .aead = {
2767 .base = {
2768 .cra_name = "rfc7539esp(chacha20,poly1305)",
2769 .cra_driver_name = "rfc7539esp-chacha20-"
2770 "poly1305-caam-qi2",
2771 .cra_blocksize = 1,
2772 },
2773 .setkey = chachapoly_setkey,
2774 .setauthsize = chachapoly_setauthsize,
2775 .encrypt = aead_encrypt,
2776 .decrypt = aead_decrypt,
2777 .ivsize = 8,
2778 .maxauthsize = POLY1305_DIGEST_SIZE,
2779 },
2780 .caam = {
2781 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2782 OP_ALG_AAI_AEAD,
2783 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2784 OP_ALG_AAI_AEAD,
24586b5f 2785 .nodkp = true,
c10a5336
HG
2786 },
2787 },
8d818c10
HG
2788 {
2789 .aead = {
2790 .base = {
2791 .cra_name = "authenc(hmac(sha512),"
2792 "rfc3686(ctr(aes)))",
2793 .cra_driver_name = "authenc-hmac-sha512-"
2794 "rfc3686-ctr-aes-caam-qi2",
2795 .cra_blocksize = 1,
2796 },
2797 .setkey = aead_setkey,
2798 .setauthsize = aead_setauthsize,
2799 .encrypt = aead_encrypt,
2800 .decrypt = aead_decrypt,
2801 .ivsize = CTR_RFC3686_IV_SIZE,
2802 .maxauthsize = SHA512_DIGEST_SIZE,
2803 },
2804 .caam = {
2805 .class1_alg_type = OP_ALG_ALGSEL_AES |
2806 OP_ALG_AAI_CTR_MOD128,
2807 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2808 OP_ALG_AAI_HMAC_PRECOMP,
2809 .rfc3686 = true,
2810 },
2811 },
2812 {
2813 .aead = {
2814 .base = {
2815 .cra_name = "seqiv(authenc(hmac(sha512),"
2816 "rfc3686(ctr(aes))))",
2817 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2818 "rfc3686-ctr-aes-caam-qi2",
2819 .cra_blocksize = 1,
2820 },
2821 .setkey = aead_setkey,
2822 .setauthsize = aead_setauthsize,
2823 .encrypt = aead_encrypt,
2824 .decrypt = aead_decrypt,
2825 .ivsize = CTR_RFC3686_IV_SIZE,
2826 .maxauthsize = SHA512_DIGEST_SIZE,
2827 },
2828 .caam = {
2829 .class1_alg_type = OP_ALG_ALGSEL_AES |
2830 OP_ALG_AAI_CTR_MOD128,
2831 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2832 OP_ALG_AAI_HMAC_PRECOMP,
2833 .rfc3686 = true,
2834 .geniv = true,
2835 },
2836 },
2837};
2838
226853ac
HG
2839static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2840{
2841 struct skcipher_alg *alg = &t_alg->skcipher;
2842
2843 alg->base.cra_module = THIS_MODULE;
2844 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2845 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2846 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2847
2848 alg->init = caam_cra_init_skcipher;
2849 alg->exit = caam_cra_exit;
2850}
2851
8d818c10
HG
2852static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2853{
2854 struct aead_alg *alg = &t_alg->aead;
2855
2856 alg->base.cra_module = THIS_MODULE;
2857 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2858 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2859 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2860
2861 alg->init = caam_cra_init_aead;
2862 alg->exit = caam_cra_exit_aead;
2863}
2864
3f16f6c9
HG
2865/* max hash key is max split key size */
2866#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2867
2868#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2869
2870/* caam context sizes for hashes: running digest + 8 */
2871#define HASH_MSG_LEN 8
2872#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2873
2874enum hash_optype {
2875 UPDATE = 0,
2876 UPDATE_FIRST,
2877 FINALIZE,
2878 DIGEST,
2879 HASH_NUM_OP
2880};
2881
2882/**
2883 * caam_hash_ctx - ahash per-session context
2884 * @flc: Flow Contexts array
2885 * @flc_dma: I/O virtual addresses of the Flow Contexts
2886 * @dev: dpseci device
2887 * @ctx_len: size of Context Register
2888 * @adata: hashing algorithm details
2889 */
2890struct caam_hash_ctx {
2891 struct caam_flc flc[HASH_NUM_OP];
2892 dma_addr_t flc_dma[HASH_NUM_OP];
2893 struct device *dev;
2894 int ctx_len;
2895 struct alginfo adata;
2896};
2897
2898/* ahash state */
2899struct caam_hash_state {
2900 struct caam_request caam_req;
2901 dma_addr_t buf_dma;
2902 dma_addr_t ctx_dma;
5965dc74 2903 int ctx_dma_len;
3f16f6c9
HG
2904 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2905 int buflen_0;
2906 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2907 int buflen_1;
2908 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
2909 int (*update)(struct ahash_request *req);
2910 int (*final)(struct ahash_request *req);
2911 int (*finup)(struct ahash_request *req);
2912 int current_buf;
2913};
2914
2915struct caam_export_state {
2916 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
2917 u8 caam_ctx[MAX_CTX_LEN];
2918 int buflen;
2919 int (*update)(struct ahash_request *req);
2920 int (*final)(struct ahash_request *req);
2921 int (*finup)(struct ahash_request *req);
2922};
2923
2924static inline void switch_buf(struct caam_hash_state *state)
2925{
2926 state->current_buf ^= 1;
2927}
2928
2929static inline u8 *current_buf(struct caam_hash_state *state)
2930{
2931 return state->current_buf ? state->buf_1 : state->buf_0;
2932}
2933
2934static inline u8 *alt_buf(struct caam_hash_state *state)
2935{
2936 return state->current_buf ? state->buf_0 : state->buf_1;
2937}
2938
2939static inline int *current_buflen(struct caam_hash_state *state)
2940{
2941 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
2942}
2943
2944static inline int *alt_buflen(struct caam_hash_state *state)
2945{
2946 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
2947}
2948
2949/* Map current buffer in state (if length > 0) and put it in link table */
2950static inline int buf_map_to_qm_sg(struct device *dev,
2951 struct dpaa2_sg_entry *qm_sg,
2952 struct caam_hash_state *state)
2953{
2954 int buflen = *current_buflen(state);
2955
2956 if (!buflen)
2957 return 0;
2958
2959 state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
2960 DMA_TO_DEVICE);
2961 if (dma_mapping_error(dev, state->buf_dma)) {
2962 dev_err(dev, "unable to map buf\n");
2963 state->buf_dma = 0;
2964 return -ENOMEM;
2965 }
2966
2967 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
2968
2969 return 0;
2970}
2971
2972/* Map state->caam_ctx, and add it to link table */
2973static inline int ctx_map_to_qm_sg(struct device *dev,
2974 struct caam_hash_state *state, int ctx_len,
2975 struct dpaa2_sg_entry *qm_sg, u32 flag)
2976{
5965dc74 2977 state->ctx_dma_len = ctx_len;
3f16f6c9
HG
2978 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2979 if (dma_mapping_error(dev, state->ctx_dma)) {
2980 dev_err(dev, "unable to map ctx\n");
2981 state->ctx_dma = 0;
2982 return -ENOMEM;
2983 }
2984
2985 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
2986
2987 return 0;
2988}
2989
2990static int ahash_set_sh_desc(struct crypto_ahash *ahash)
2991{
2992 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
2993 int digestsize = crypto_ahash_digestsize(ahash);
2994 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
2995 struct caam_flc *flc;
2996 u32 *desc;
2997
2998 /* ahash_update shared descriptor */
2999 flc = &ctx->flc[UPDATE];
3000 desc = flc->sh_desc;
3001 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3002 ctx->ctx_len, true, priv->sec_attr.era);
3003 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3004 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3005 desc_bytes(desc), DMA_BIDIRECTIONAL);
3006 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3007 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3008 1);
3009
3010 /* ahash_update_first shared descriptor */
3011 flc = &ctx->flc[UPDATE_FIRST];
3012 desc = flc->sh_desc;
3013 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3014 ctx->ctx_len, false, priv->sec_attr.era);
3015 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3016 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3017 desc_bytes(desc), DMA_BIDIRECTIONAL);
3018 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3019 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3020 1);
3021
3022 /* ahash_final shared descriptor */
3023 flc = &ctx->flc[FINALIZE];
3024 desc = flc->sh_desc;
3025 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3026 ctx->ctx_len, true, priv->sec_attr.era);
3027 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3028 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3029 desc_bytes(desc), DMA_BIDIRECTIONAL);
3030 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3031 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3032 1);
3033
3034 /* ahash_digest shared descriptor */
3035 flc = &ctx->flc[DIGEST];
3036 desc = flc->sh_desc;
3037 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3038 ctx->ctx_len, false, priv->sec_attr.era);
3039 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3040 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3041 desc_bytes(desc), DMA_BIDIRECTIONAL);
3042 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3043 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3044 1);
3045
3046 return 0;
3047}
3048
3049struct split_key_sh_result {
3050 struct completion completion;
3051 int err;
3052 struct device *dev;
3053};
3054
3055static void split_key_sh_done(void *cbk_ctx, u32 err)
3056{
3057 struct split_key_sh_result *res = cbk_ctx;
3058
3059 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3060
3061 if (err)
3062 caam_qi2_strstatus(res->dev, err);
3063
3064 res->err = err;
3065 complete(&res->completion);
3066}
3067
3068/* Digest hash size if it is too large */
418cd20e
HG
3069static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3070 u32 digestsize)
3f16f6c9
HG
3071{
3072 struct caam_request *req_ctx;
3073 u32 *desc;
3074 struct split_key_sh_result result;
418cd20e 3075 dma_addr_t key_dma;
3f16f6c9
HG
3076 struct caam_flc *flc;
3077 dma_addr_t flc_dma;
3078 int ret = -ENOMEM;
3079 struct dpaa2_fl_entry *in_fle, *out_fle;
3080
3081 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3082 if (!req_ctx)
3083 return -ENOMEM;
3084
3085 in_fle = &req_ctx->fd_flt[1];
3086 out_fle = &req_ctx->fd_flt[0];
3087
3088 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3089 if (!flc)
3090 goto err_flc;
3091
418cd20e
HG
3092 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3093 if (dma_mapping_error(ctx->dev, key_dma)) {
3094 dev_err(ctx->dev, "unable to map key memory\n");
3095 goto err_key_dma;
3f16f6c9
HG
3096 }
3097
3098 desc = flc->sh_desc;
3099
3100 init_sh_desc(desc, 0);
3101
3102 /* descriptor to perform unkeyed hash on key_in */
3103 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3104 OP_ALG_AS_INITFINAL);
3105 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3106 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3107 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3108 LDST_SRCDST_BYTE_CONTEXT);
3109
3110 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3111 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3112 desc_bytes(desc), DMA_TO_DEVICE);
3113 if (dma_mapping_error(ctx->dev, flc_dma)) {
3114 dev_err(ctx->dev, "unable to map shared descriptor\n");
3115 goto err_flc_dma;
3116 }
3117
3118 dpaa2_fl_set_final(in_fle, true);
3119 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
418cd20e 3120 dpaa2_fl_set_addr(in_fle, key_dma);
3f16f6c9
HG
3121 dpaa2_fl_set_len(in_fle, *keylen);
3122 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
418cd20e 3123 dpaa2_fl_set_addr(out_fle, key_dma);
3f16f6c9
HG
3124 dpaa2_fl_set_len(out_fle, digestsize);
3125
3126 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
418cd20e 3127 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3f16f6c9
HG
3128 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3129 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3130 1);
3131
3132 result.err = 0;
3133 init_completion(&result.completion);
3134 result.dev = ctx->dev;
3135
3136 req_ctx->flc = flc;
3137 req_ctx->flc_dma = flc_dma;
3138 req_ctx->cbk = split_key_sh_done;
3139 req_ctx->ctx = &result;
3140
3141 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3142 if (ret == -EINPROGRESS) {
3143 /* in progress */
3144 wait_for_completion(&result.completion);
3145 ret = result.err;
3146 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
418cd20e 3147 DUMP_PREFIX_ADDRESS, 16, 4, key,
3f16f6c9
HG
3148 digestsize, 1);
3149 }
3150
3151 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3152 DMA_TO_DEVICE);
3153err_flc_dma:
418cd20e
HG
3154 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3155err_key_dma:
3f16f6c9
HG
3156 kfree(flc);
3157err_flc:
3158 kfree(req_ctx);
3159
3160 *keylen = digestsize;
3161
3162 return ret;
3163}
3164
3165static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3166 unsigned int keylen)
3167{
3168 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3169 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3170 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3171 int ret;
3172 u8 *hashed_key = NULL;
3173
3174 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3175
3176 if (keylen > blocksize) {
418cd20e 3177 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3f16f6c9
HG
3178 if (!hashed_key)
3179 return -ENOMEM;
418cd20e 3180 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3f16f6c9
HG
3181 if (ret)
3182 goto bad_free_key;
3183 key = hashed_key;
3184 }
3185
3186 ctx->adata.keylen = keylen;
3187 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3188 OP_ALG_ALGSEL_MASK);
3189 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3190 goto bad_free_key;
3191
3192 ctx->adata.key_virt = key;
3193 ctx->adata.key_inline = true;
3194
3195 ret = ahash_set_sh_desc(ahash);
3196 kfree(hashed_key);
3197 return ret;
3198bad_free_key:
3199 kfree(hashed_key);
3200 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3201 return -EINVAL;
3202}
3203
3204static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
5965dc74 3205 struct ahash_request *req)
3f16f6c9
HG
3206{
3207 struct caam_hash_state *state = ahash_request_ctx(req);
3208
3209 if (edesc->src_nents)
3210 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3f16f6c9
HG
3211
3212 if (edesc->qm_sg_bytes)
3213 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3214 DMA_TO_DEVICE);
3215
3216 if (state->buf_dma) {
3217 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3218 DMA_TO_DEVICE);
3219 state->buf_dma = 0;
3220 }
3221}
3222
3223static inline void ahash_unmap_ctx(struct device *dev,
3224 struct ahash_edesc *edesc,
5965dc74 3225 struct ahash_request *req, u32 flag)
3f16f6c9 3226{
3f16f6c9
HG
3227 struct caam_hash_state *state = ahash_request_ctx(req);
3228
3229 if (state->ctx_dma) {
5965dc74 3230 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3f16f6c9
HG
3231 state->ctx_dma = 0;
3232 }
5965dc74 3233 ahash_unmap(dev, edesc, req);
3f16f6c9
HG
3234}
3235
3236static void ahash_done(void *cbk_ctx, u32 status)
3237{
3238 struct crypto_async_request *areq = cbk_ctx;
3239 struct ahash_request *req = ahash_request_cast(areq);
3240 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3241 struct caam_hash_state *state = ahash_request_ctx(req);
3242 struct ahash_edesc *edesc = state->caam_req.edesc;
3243 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3244 int digestsize = crypto_ahash_digestsize(ahash);
3245 int ecode = 0;
3246
3247 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3248
3249 if (unlikely(status)) {
3250 caam_qi2_strstatus(ctx->dev, status);
3251 ecode = -EIO;
3252 }
3253
5965dc74
HG
3254 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3255 memcpy(req->result, state->caam_ctx, digestsize);
3f16f6c9
HG
3256 qi_cache_free(edesc);
3257
3258 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3259 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3260 ctx->ctx_len, 1);
3f16f6c9
HG
3261
3262 req->base.complete(&req->base, ecode);
3263}
3264
3265static void ahash_done_bi(void *cbk_ctx, u32 status)
3266{
3267 struct crypto_async_request *areq = cbk_ctx;
3268 struct ahash_request *req = ahash_request_cast(areq);
3269 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3270 struct caam_hash_state *state = ahash_request_ctx(req);
3271 struct ahash_edesc *edesc = state->caam_req.edesc;
3272 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3273 int ecode = 0;
3274
3275 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3276
3277 if (unlikely(status)) {
3278 caam_qi2_strstatus(ctx->dev, status);
3279 ecode = -EIO;
3280 }
3281
5965dc74 3282 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3f16f6c9
HG
3283 switch_buf(state);
3284 qi_cache_free(edesc);
3285
3286 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3287 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3288 ctx->ctx_len, 1);
3289 if (req->result)
3290 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3291 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3292 crypto_ahash_digestsize(ahash), 1);
3293
3294 req->base.complete(&req->base, ecode);
3295}
3296
3297static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3298{
3299 struct crypto_async_request *areq = cbk_ctx;
3300 struct ahash_request *req = ahash_request_cast(areq);
3301 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3302 struct caam_hash_state *state = ahash_request_ctx(req);
3303 struct ahash_edesc *edesc = state->caam_req.edesc;
3304 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3305 int digestsize = crypto_ahash_digestsize(ahash);
3306 int ecode = 0;
3307
3308 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3309
3310 if (unlikely(status)) {
3311 caam_qi2_strstatus(ctx->dev, status);
3312 ecode = -EIO;
3313 }
3314
5965dc74
HG
3315 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3316 memcpy(req->result, state->caam_ctx, digestsize);
3f16f6c9
HG
3317 qi_cache_free(edesc);
3318
3319 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3320 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3321 ctx->ctx_len, 1);
3f16f6c9
HG
3322
3323 req->base.complete(&req->base, ecode);
3324}
3325
3326static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3327{
3328 struct crypto_async_request *areq = cbk_ctx;
3329 struct ahash_request *req = ahash_request_cast(areq);
3330 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3331 struct caam_hash_state *state = ahash_request_ctx(req);
3332 struct ahash_edesc *edesc = state->caam_req.edesc;
3333 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3334 int ecode = 0;
3335
3336 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3337
3338 if (unlikely(status)) {
3339 caam_qi2_strstatus(ctx->dev, status);
3340 ecode = -EIO;
3341 }
3342
5965dc74 3343 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3f16f6c9
HG
3344 switch_buf(state);
3345 qi_cache_free(edesc);
3346
3347 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3348 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3349 ctx->ctx_len, 1);
3350 if (req->result)
3351 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3352 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3353 crypto_ahash_digestsize(ahash), 1);
3354
3355 req->base.complete(&req->base, ecode);
3356}
3357
3358static int ahash_update_ctx(struct ahash_request *req)
3359{
3360 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3361 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3362 struct caam_hash_state *state = ahash_request_ctx(req);
3363 struct caam_request *req_ctx = &state->caam_req;
3364 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3365 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3366 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3367 GFP_KERNEL : GFP_ATOMIC;
3368 u8 *buf = current_buf(state);
3369 int *buflen = current_buflen(state);
3370 u8 *next_buf = alt_buf(state);
3371 int *next_buflen = alt_buflen(state), last_buflen;
3372 int in_len = *buflen + req->nbytes, to_hash;
3373 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3374 struct ahash_edesc *edesc;
3375 int ret = 0;
3376
3377 last_buflen = *next_buflen;
3378 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3379 to_hash = in_len - *next_buflen;
3380
3381 if (to_hash) {
3382 struct dpaa2_sg_entry *sg_table;
3383
3384 src_nents = sg_nents_for_len(req->src,
3385 req->nbytes - (*next_buflen));
3386 if (src_nents < 0) {
3387 dev_err(ctx->dev, "Invalid number of src SG.\n");
3388 return src_nents;
3389 }
3390
3391 if (src_nents) {
3392 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3393 DMA_TO_DEVICE);
3394 if (!mapped_nents) {
3395 dev_err(ctx->dev, "unable to DMA map source\n");
3396 return -ENOMEM;
3397 }
3398 } else {
3399 mapped_nents = 0;
3400 }
3401
3402 /* allocate space for base edesc and link tables */
3403 edesc = qi_cache_zalloc(GFP_DMA | flags);
3404 if (!edesc) {
3405 dma_unmap_sg(ctx->dev, req->src, src_nents,
3406 DMA_TO_DEVICE);
3407 return -ENOMEM;
3408 }
3409
3410 edesc->src_nents = src_nents;
3411 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3412 qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
3413 sizeof(*sg_table);
3414 sg_table = &edesc->sgt[0];
3415
3416 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3417 DMA_BIDIRECTIONAL);
3418 if (ret)
3419 goto unmap_ctx;
3420
3421 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3422 if (ret)
3423 goto unmap_ctx;
3424
3425 if (mapped_nents) {
3426 sg_to_qm_sg_last(req->src, mapped_nents,
3427 sg_table + qm_sg_src_index, 0);
3428 if (*next_buflen)
3429 scatterwalk_map_and_copy(next_buf, req->src,
3430 to_hash - *buflen,
3431 *next_buflen, 0);
3432 } else {
3433 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3434 true);
3435 }
3436
3437 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3438 qm_sg_bytes, DMA_TO_DEVICE);
3439 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3440 dev_err(ctx->dev, "unable to map S/G table\n");
3441 ret = -ENOMEM;
3442 goto unmap_ctx;
3443 }
3444 edesc->qm_sg_bytes = qm_sg_bytes;
3445
3446 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3447 dpaa2_fl_set_final(in_fle, true);
3448 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3449 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3450 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3451 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3452 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3453 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3454
3455 req_ctx->flc = &ctx->flc[UPDATE];
3456 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3457 req_ctx->cbk = ahash_done_bi;
3458 req_ctx->ctx = &req->base;
3459 req_ctx->edesc = edesc;
3460
3461 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3462 if (ret != -EINPROGRESS &&
3463 !(ret == -EBUSY &&
3464 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3465 goto unmap_ctx;
3466 } else if (*next_buflen) {
3467 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3468 req->nbytes, 0);
3469 *buflen = *next_buflen;
3470 *next_buflen = last_buflen;
3471 }
3472
3473 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3474 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3475 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3476 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3477 1);
3478
3479 return ret;
3480unmap_ctx:
5965dc74 3481 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3f16f6c9
HG
3482 qi_cache_free(edesc);
3483 return ret;
3484}
3485
3486static int ahash_final_ctx(struct ahash_request *req)
3487{
3488 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3489 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3490 struct caam_hash_state *state = ahash_request_ctx(req);
3491 struct caam_request *req_ctx = &state->caam_req;
3492 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3493 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3494 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3495 GFP_KERNEL : GFP_ATOMIC;
3496 int buflen = *current_buflen(state);
3497 int qm_sg_bytes, qm_sg_src_index;
3498 int digestsize = crypto_ahash_digestsize(ahash);
3499 struct ahash_edesc *edesc;
3500 struct dpaa2_sg_entry *sg_table;
3501 int ret;
3502
3503 /* allocate space for base edesc and link tables */
3504 edesc = qi_cache_zalloc(GFP_DMA | flags);
3505 if (!edesc)
3506 return -ENOMEM;
3507
3508 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3509 qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
3510 sg_table = &edesc->sgt[0];
3511
3512 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
5965dc74 3513 DMA_BIDIRECTIONAL);
3f16f6c9
HG
3514 if (ret)
3515 goto unmap_ctx;
3516
3517 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3518 if (ret)
3519 goto unmap_ctx;
3520
3521 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
3522
3523 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3524 DMA_TO_DEVICE);
3525 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3526 dev_err(ctx->dev, "unable to map S/G table\n");
3527 ret = -ENOMEM;
3528 goto unmap_ctx;
3529 }
3530 edesc->qm_sg_bytes = qm_sg_bytes;
3531
3f16f6c9
HG
3532 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3533 dpaa2_fl_set_final(in_fle, true);
3534 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3535 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3536 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3537 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5965dc74 3538 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3f16f6c9
HG
3539 dpaa2_fl_set_len(out_fle, digestsize);
3540
3541 req_ctx->flc = &ctx->flc[FINALIZE];
3542 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3543 req_ctx->cbk = ahash_done_ctx_src;
3544 req_ctx->ctx = &req->base;
3545 req_ctx->edesc = edesc;
3546
3547 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3548 if (ret == -EINPROGRESS ||
3549 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3550 return ret;
3551
3552unmap_ctx:
5965dc74 3553 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3f16f6c9
HG
3554 qi_cache_free(edesc);
3555 return ret;
3556}
3557
3558static int ahash_finup_ctx(struct ahash_request *req)
3559{
3560 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3561 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3562 struct caam_hash_state *state = ahash_request_ctx(req);
3563 struct caam_request *req_ctx = &state->caam_req;
3564 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3565 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3566 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3567 GFP_KERNEL : GFP_ATOMIC;
3568 int buflen = *current_buflen(state);
3569 int qm_sg_bytes, qm_sg_src_index;
3570 int src_nents, mapped_nents;
3571 int digestsize = crypto_ahash_digestsize(ahash);
3572 struct ahash_edesc *edesc;
3573 struct dpaa2_sg_entry *sg_table;
3574 int ret;
3575
3576 src_nents = sg_nents_for_len(req->src, req->nbytes);
3577 if (src_nents < 0) {
3578 dev_err(ctx->dev, "Invalid number of src SG.\n");
3579 return src_nents;
3580 }
3581
3582 if (src_nents) {
3583 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3584 DMA_TO_DEVICE);
3585 if (!mapped_nents) {
3586 dev_err(ctx->dev, "unable to DMA map source\n");
3587 return -ENOMEM;
3588 }
3589 } else {
3590 mapped_nents = 0;
3591 }
3592
3593 /* allocate space for base edesc and link tables */
3594 edesc = qi_cache_zalloc(GFP_DMA | flags);
3595 if (!edesc) {
3596 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3597 return -ENOMEM;
3598 }
3599
3600 edesc->src_nents = src_nents;
3601 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3602 qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
3603 sg_table = &edesc->sgt[0];
3604
3605 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
5965dc74 3606 DMA_BIDIRECTIONAL);
3f16f6c9
HG
3607 if (ret)
3608 goto unmap_ctx;
3609
3610 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3611 if (ret)
3612 goto unmap_ctx;
3613
3614 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
3615
3616 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3617 DMA_TO_DEVICE);
3618 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3619 dev_err(ctx->dev, "unable to map S/G table\n");
3620 ret = -ENOMEM;
3621 goto unmap_ctx;
3622 }
3623 edesc->qm_sg_bytes = qm_sg_bytes;
3624
3f16f6c9
HG
3625 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3626 dpaa2_fl_set_final(in_fle, true);
3627 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3628 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3629 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3630 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5965dc74 3631 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3f16f6c9
HG
3632 dpaa2_fl_set_len(out_fle, digestsize);
3633
3634 req_ctx->flc = &ctx->flc[FINALIZE];
3635 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3636 req_ctx->cbk = ahash_done_ctx_src;
3637 req_ctx->ctx = &req->base;
3638 req_ctx->edesc = edesc;
3639
3640 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3641 if (ret == -EINPROGRESS ||
3642 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3643 return ret;
3644
3645unmap_ctx:
5965dc74 3646 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3f16f6c9
HG
3647 qi_cache_free(edesc);
3648 return ret;
3649}
3650
3651static int ahash_digest(struct ahash_request *req)
3652{
3653 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3654 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3655 struct caam_hash_state *state = ahash_request_ctx(req);
3656 struct caam_request *req_ctx = &state->caam_req;
3657 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3658 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3659 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3660 GFP_KERNEL : GFP_ATOMIC;
3661 int digestsize = crypto_ahash_digestsize(ahash);
3662 int src_nents, mapped_nents;
3663 struct ahash_edesc *edesc;
3664 int ret = -ENOMEM;
3665
3666 state->buf_dma = 0;
3667
3668 src_nents = sg_nents_for_len(req->src, req->nbytes);
3669 if (src_nents < 0) {
3670 dev_err(ctx->dev, "Invalid number of src SG.\n");
3671 return src_nents;
3672 }
3673
3674 if (src_nents) {
3675 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3676 DMA_TO_DEVICE);
3677 if (!mapped_nents) {
3678 dev_err(ctx->dev, "unable to map source for DMA\n");
3679 return ret;
3680 }
3681 } else {
3682 mapped_nents = 0;
3683 }
3684
3685 /* allocate space for base edesc and link tables */
3686 edesc = qi_cache_zalloc(GFP_DMA | flags);
3687 if (!edesc) {
3688 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3689 return ret;
3690 }
3691
3692 edesc->src_nents = src_nents;
3693 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3694
3695 if (mapped_nents > 1) {
3696 int qm_sg_bytes;
3697 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3698
3699 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
3700 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
3701 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3702 qm_sg_bytes, DMA_TO_DEVICE);
3703 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3704 dev_err(ctx->dev, "unable to map S/G table\n");
3705 goto unmap;
3706 }
3707 edesc->qm_sg_bytes = qm_sg_bytes;
3708 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3709 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3710 } else {
3711 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3712 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3713 }
3714
5965dc74
HG
3715 state->ctx_dma_len = digestsize;
3716 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3f16f6c9 3717 DMA_FROM_DEVICE);
5965dc74
HG
3718 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3719 dev_err(ctx->dev, "unable to map ctx\n");
3720 state->ctx_dma = 0;
3f16f6c9
HG
3721 goto unmap;
3722 }
3723
3724 dpaa2_fl_set_final(in_fle, true);
3725 dpaa2_fl_set_len(in_fle, req->nbytes);
3726 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5965dc74 3727 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3f16f6c9
HG
3728 dpaa2_fl_set_len(out_fle, digestsize);
3729
3730 req_ctx->flc = &ctx->flc[DIGEST];
3731 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3732 req_ctx->cbk = ahash_done;
3733 req_ctx->ctx = &req->base;
3734 req_ctx->edesc = edesc;
3735 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3736 if (ret == -EINPROGRESS ||
3737 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3738 return ret;
3739
3740unmap:
5965dc74 3741 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3f16f6c9
HG
3742 qi_cache_free(edesc);
3743 return ret;
3744}
3745
3746static int ahash_final_no_ctx(struct ahash_request *req)
3747{
3748 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3749 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3750 struct caam_hash_state *state = ahash_request_ctx(req);
3751 struct caam_request *req_ctx = &state->caam_req;
3752 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3753 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3754 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3755 GFP_KERNEL : GFP_ATOMIC;
3756 u8 *buf = current_buf(state);
3757 int buflen = *current_buflen(state);
3758 int digestsize = crypto_ahash_digestsize(ahash);
3759 struct ahash_edesc *edesc;
3760 int ret = -ENOMEM;
3761
3762 /* allocate space for base edesc and link tables */
3763 edesc = qi_cache_zalloc(GFP_DMA | flags);
3764 if (!edesc)
3765 return ret;
3766
07586d3d
HG
3767 if (buflen) {
3768 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3769 DMA_TO_DEVICE);
3770 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3771 dev_err(ctx->dev, "unable to map src\n");
3772 goto unmap;
3773 }
3f16f6c9
HG
3774 }
3775
5965dc74
HG
3776 state->ctx_dma_len = digestsize;
3777 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3f16f6c9 3778 DMA_FROM_DEVICE);
5965dc74
HG
3779 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3780 dev_err(ctx->dev, "unable to map ctx\n");
3781 state->ctx_dma = 0;
3f16f6c9
HG
3782 goto unmap;
3783 }
3784
3785 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3786 dpaa2_fl_set_final(in_fle, true);
07586d3d
HG
3787 /*
3788 * crypto engine requires the input entry to be present when
3789 * "frame list" FD is used.
3790 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3791 * in_fle zeroized (except for "Final" flag) is the best option.
3792 */
3793 if (buflen) {
3794 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3795 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3796 dpaa2_fl_set_len(in_fle, buflen);
3797 }
3f16f6c9 3798 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5965dc74 3799 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3f16f6c9
HG
3800 dpaa2_fl_set_len(out_fle, digestsize);
3801
3802 req_ctx->flc = &ctx->flc[DIGEST];
3803 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3804 req_ctx->cbk = ahash_done;
3805 req_ctx->ctx = &req->base;
3806 req_ctx->edesc = edesc;
3807
3808 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3809 if (ret == -EINPROGRESS ||
3810 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3811 return ret;
3812
3813unmap:
5965dc74 3814 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3f16f6c9
HG
3815 qi_cache_free(edesc);
3816 return ret;
3817}
3818
3819static int ahash_update_no_ctx(struct ahash_request *req)
3820{
3821 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3822 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3823 struct caam_hash_state *state = ahash_request_ctx(req);
3824 struct caam_request *req_ctx = &state->caam_req;
3825 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3826 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3827 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3828 GFP_KERNEL : GFP_ATOMIC;
3829 u8 *buf = current_buf(state);
3830 int *buflen = current_buflen(state);
3831 u8 *next_buf = alt_buf(state);
3832 int *next_buflen = alt_buflen(state);
3833 int in_len = *buflen + req->nbytes, to_hash;
3834 int qm_sg_bytes, src_nents, mapped_nents;
3835 struct ahash_edesc *edesc;
3836 int ret = 0;
3837
3838 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3839 to_hash = in_len - *next_buflen;
3840
3841 if (to_hash) {
3842 struct dpaa2_sg_entry *sg_table;
3843
3844 src_nents = sg_nents_for_len(req->src,
3845 req->nbytes - *next_buflen);
3846 if (src_nents < 0) {
3847 dev_err(ctx->dev, "Invalid number of src SG.\n");
3848 return src_nents;
3849 }
3850
3851 if (src_nents) {
3852 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3853 DMA_TO_DEVICE);
3854 if (!mapped_nents) {
3855 dev_err(ctx->dev, "unable to DMA map source\n");
3856 return -ENOMEM;
3857 }
3858 } else {
3859 mapped_nents = 0;
3860 }
3861
3862 /* allocate space for base edesc and link tables */
3863 edesc = qi_cache_zalloc(GFP_DMA | flags);
3864 if (!edesc) {
3865 dma_unmap_sg(ctx->dev, req->src, src_nents,
3866 DMA_TO_DEVICE);
3867 return -ENOMEM;
3868 }
3869
3870 edesc->src_nents = src_nents;
3871 qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
3872 sg_table = &edesc->sgt[0];
3873
3874 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3875 if (ret)
3876 goto unmap_ctx;
3877
3878 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3879
3880 if (*next_buflen)
3881 scatterwalk_map_and_copy(next_buf, req->src,
3882 to_hash - *buflen,
3883 *next_buflen, 0);
3884
3885 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3886 qm_sg_bytes, DMA_TO_DEVICE);
3887 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3888 dev_err(ctx->dev, "unable to map S/G table\n");
3889 ret = -ENOMEM;
3890 goto unmap_ctx;
3891 }
3892 edesc->qm_sg_bytes = qm_sg_bytes;
3893
5965dc74 3894 state->ctx_dma_len = ctx->ctx_len;
3f16f6c9
HG
3895 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3896 ctx->ctx_len, DMA_FROM_DEVICE);
3897 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3898 dev_err(ctx->dev, "unable to map ctx\n");
3899 state->ctx_dma = 0;
3900 ret = -ENOMEM;
3901 goto unmap_ctx;
3902 }
3903
3904 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3905 dpaa2_fl_set_final(in_fle, true);
3906 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3907 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3908 dpaa2_fl_set_len(in_fle, to_hash);
3909 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3910 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3911 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3912
3913 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
3914 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
3915 req_ctx->cbk = ahash_done_ctx_dst;
3916 req_ctx->ctx = &req->base;
3917 req_ctx->edesc = edesc;
3918
3919 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3920 if (ret != -EINPROGRESS &&
3921 !(ret == -EBUSY &&
3922 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3923 goto unmap_ctx;
3924
3925 state->update = ahash_update_ctx;
3926 state->finup = ahash_finup_ctx;
3927 state->final = ahash_final_ctx;
3928 } else if (*next_buflen) {
3929 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3930 req->nbytes, 0);
3931 *buflen = *next_buflen;
3932 *next_buflen = 0;
3933 }
3934
3935 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3936 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3937 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3938 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3939 1);
3940
3941 return ret;
3942unmap_ctx:
5965dc74 3943 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3f16f6c9
HG
3944 qi_cache_free(edesc);
3945 return ret;
3946}
3947
3948static int ahash_finup_no_ctx(struct ahash_request *req)
3949{
3950 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3951 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3952 struct caam_hash_state *state = ahash_request_ctx(req);
3953 struct caam_request *req_ctx = &state->caam_req;
3954 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3955 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3956 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3957 GFP_KERNEL : GFP_ATOMIC;
3958 int buflen = *current_buflen(state);
3959 int qm_sg_bytes, src_nents, mapped_nents;
3960 int digestsize = crypto_ahash_digestsize(ahash);
3961 struct ahash_edesc *edesc;
3962 struct dpaa2_sg_entry *sg_table;
3963 int ret;
3964
3965 src_nents = sg_nents_for_len(req->src, req->nbytes);
3966 if (src_nents < 0) {
3967 dev_err(ctx->dev, "Invalid number of src SG.\n");
3968 return src_nents;
3969 }
3970
3971 if (src_nents) {
3972 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3973 DMA_TO_DEVICE);
3974 if (!mapped_nents) {
3975 dev_err(ctx->dev, "unable to DMA map source\n");
3976 return -ENOMEM;
3977 }
3978 } else {
3979 mapped_nents = 0;
3980 }
3981
3982 /* allocate space for base edesc and link tables */
3983 edesc = qi_cache_zalloc(GFP_DMA | flags);
3984 if (!edesc) {
3985 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3986 return -ENOMEM;
3987 }
3988
3989 edesc->src_nents = src_nents;
3990 qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
3991 sg_table = &edesc->sgt[0];
3992
3993 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3994 if (ret)
3995 goto unmap;
3996
3997 sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
3998
3999 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4000 DMA_TO_DEVICE);
4001 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4002 dev_err(ctx->dev, "unable to map S/G table\n");
4003 ret = -ENOMEM;
4004 goto unmap;
4005 }
4006 edesc->qm_sg_bytes = qm_sg_bytes;
4007
5965dc74
HG
4008 state->ctx_dma_len = digestsize;
4009 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3f16f6c9 4010 DMA_FROM_DEVICE);
5965dc74
HG
4011 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4012 dev_err(ctx->dev, "unable to map ctx\n");
4013 state->ctx_dma = 0;
3f16f6c9
HG
4014 ret = -ENOMEM;
4015 goto unmap;
4016 }
4017
4018 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4019 dpaa2_fl_set_final(in_fle, true);
4020 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4021 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4022 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4023 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5965dc74 4024 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3f16f6c9
HG
4025 dpaa2_fl_set_len(out_fle, digestsize);
4026
4027 req_ctx->flc = &ctx->flc[DIGEST];
4028 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4029 req_ctx->cbk = ahash_done;
4030 req_ctx->ctx = &req->base;
4031 req_ctx->edesc = edesc;
4032 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4033 if (ret != -EINPROGRESS &&
4034 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4035 goto unmap;
4036
4037 return ret;
4038unmap:
5965dc74 4039 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3f16f6c9
HG
4040 qi_cache_free(edesc);
4041 return -ENOMEM;
4042}
4043
4044static int ahash_update_first(struct ahash_request *req)
4045{
4046 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4047 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4048 struct caam_hash_state *state = ahash_request_ctx(req);
4049 struct caam_request *req_ctx = &state->caam_req;
4050 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4051 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4052 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4053 GFP_KERNEL : GFP_ATOMIC;
4054 u8 *next_buf = alt_buf(state);
4055 int *next_buflen = alt_buflen(state);
4056 int to_hash;
4057 int src_nents, mapped_nents;
4058 struct ahash_edesc *edesc;
4059 int ret = 0;
4060
4061 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4062 1);
4063 to_hash = req->nbytes - *next_buflen;
4064
4065 if (to_hash) {
4066 struct dpaa2_sg_entry *sg_table;
4067
4068 src_nents = sg_nents_for_len(req->src,
4069 req->nbytes - (*next_buflen));
4070 if (src_nents < 0) {
4071 dev_err(ctx->dev, "Invalid number of src SG.\n");
4072 return src_nents;
4073 }
4074
4075 if (src_nents) {
4076 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4077 DMA_TO_DEVICE);
4078 if (!mapped_nents) {
4079 dev_err(ctx->dev, "unable to map source for DMA\n");
4080 return -ENOMEM;
4081 }
4082 } else {
4083 mapped_nents = 0;
4084 }
4085
4086 /* allocate space for base edesc and link tables */
4087 edesc = qi_cache_zalloc(GFP_DMA | flags);
4088 if (!edesc) {
4089 dma_unmap_sg(ctx->dev, req->src, src_nents,
4090 DMA_TO_DEVICE);
4091 return -ENOMEM;
4092 }
4093
4094 edesc->src_nents = src_nents;
4095 sg_table = &edesc->sgt[0];
4096
4097 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4098 dpaa2_fl_set_final(in_fle, true);
4099 dpaa2_fl_set_len(in_fle, to_hash);
4100
4101 if (mapped_nents > 1) {
4102 int qm_sg_bytes;
4103
4104 sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
4105 qm_sg_bytes = mapped_nents * sizeof(*sg_table);
4106 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4107 qm_sg_bytes,
4108 DMA_TO_DEVICE);
4109 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4110 dev_err(ctx->dev, "unable to map S/G table\n");
4111 ret = -ENOMEM;
4112 goto unmap_ctx;
4113 }
4114 edesc->qm_sg_bytes = qm_sg_bytes;
4115 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4116 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4117 } else {
4118 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4119 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4120 }
4121
4122 if (*next_buflen)
4123 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4124 *next_buflen, 0);
4125
5965dc74 4126 state->ctx_dma_len = ctx->ctx_len;
3f16f6c9
HG
4127 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4128 ctx->ctx_len, DMA_FROM_DEVICE);
4129 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4130 dev_err(ctx->dev, "unable to map ctx\n");
4131 state->ctx_dma = 0;
4132 ret = -ENOMEM;
4133 goto unmap_ctx;
4134 }
4135
4136 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4137 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4138 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4139
4140 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4141 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4142 req_ctx->cbk = ahash_done_ctx_dst;
4143 req_ctx->ctx = &req->base;
4144 req_ctx->edesc = edesc;
4145
4146 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4147 if (ret != -EINPROGRESS &&
4148 !(ret == -EBUSY && req->base.flags &
4149 CRYPTO_TFM_REQ_MAY_BACKLOG))
4150 goto unmap_ctx;
4151
4152 state->update = ahash_update_ctx;
4153 state->finup = ahash_finup_ctx;
4154 state->final = ahash_final_ctx;
4155 } else if (*next_buflen) {
4156 state->update = ahash_update_no_ctx;
4157 state->finup = ahash_finup_no_ctx;
4158 state->final = ahash_final_no_ctx;
4159 scatterwalk_map_and_copy(next_buf, req->src, 0,
4160 req->nbytes, 0);
4161 switch_buf(state);
4162 }
4163
4164 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4165 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4166 1);
4167
4168 return ret;
4169unmap_ctx:
5965dc74 4170 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3f16f6c9
HG
4171 qi_cache_free(edesc);
4172 return ret;
4173}
4174
4175static int ahash_finup_first(struct ahash_request *req)
4176{
4177 return ahash_digest(req);
4178}
4179
4180static int ahash_init(struct ahash_request *req)
4181{
4182 struct caam_hash_state *state = ahash_request_ctx(req);
4183
4184 state->update = ahash_update_first;
4185 state->finup = ahash_finup_first;
4186 state->final = ahash_final_no_ctx;
4187
4188 state->ctx_dma = 0;
5965dc74 4189 state->ctx_dma_len = 0;
3f16f6c9
HG
4190 state->current_buf = 0;
4191 state->buf_dma = 0;
4192 state->buflen_0 = 0;
4193 state->buflen_1 = 0;
4194
4195 return 0;
4196}
4197
4198static int ahash_update(struct ahash_request *req)
4199{
4200 struct caam_hash_state *state = ahash_request_ctx(req);
4201
4202 return state->update(req);
4203}
4204
4205static int ahash_finup(struct ahash_request *req)
4206{
4207 struct caam_hash_state *state = ahash_request_ctx(req);
4208
4209 return state->finup(req);
4210}
4211
4212static int ahash_final(struct ahash_request *req)
4213{
4214 struct caam_hash_state *state = ahash_request_ctx(req);
4215
4216 return state->final(req);
4217}
4218
4219static int ahash_export(struct ahash_request *req, void *out)
4220{
4221 struct caam_hash_state *state = ahash_request_ctx(req);
4222 struct caam_export_state *export = out;
4223 int len;
4224 u8 *buf;
4225
4226 if (state->current_buf) {
4227 buf = state->buf_1;
4228 len = state->buflen_1;
4229 } else {
4230 buf = state->buf_0;
4231 len = state->buflen_0;
4232 }
4233
4234 memcpy(export->buf, buf, len);
4235 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4236 export->buflen = len;
4237 export->update = state->update;
4238 export->final = state->final;
4239 export->finup = state->finup;
4240
4241 return 0;
4242}
4243
4244static int ahash_import(struct ahash_request *req, const void *in)
4245{
4246 struct caam_hash_state *state = ahash_request_ctx(req);
4247 const struct caam_export_state *export = in;
4248
4249 memset(state, 0, sizeof(*state));
4250 memcpy(state->buf_0, export->buf, export->buflen);
4251 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4252 state->buflen_0 = export->buflen;
4253 state->update = export->update;
4254 state->final = export->final;
4255 state->finup = export->finup;
4256
4257 return 0;
4258}
4259
4260struct caam_hash_template {
4261 char name[CRYPTO_MAX_ALG_NAME];
4262 char driver_name[CRYPTO_MAX_ALG_NAME];
4263 char hmac_name[CRYPTO_MAX_ALG_NAME];
4264 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4265 unsigned int blocksize;
4266 struct ahash_alg template_ahash;
4267 u32 alg_type;
4268};
4269
4270/* ahash descriptors */
4271static struct caam_hash_template driver_hash[] = {
4272 {
4273 .name = "sha1",
4274 .driver_name = "sha1-caam-qi2",
4275 .hmac_name = "hmac(sha1)",
4276 .hmac_driver_name = "hmac-sha1-caam-qi2",
4277 .blocksize = SHA1_BLOCK_SIZE,
4278 .template_ahash = {
4279 .init = ahash_init,
4280 .update = ahash_update,
4281 .final = ahash_final,
4282 .finup = ahash_finup,
4283 .digest = ahash_digest,
4284 .export = ahash_export,
4285 .import = ahash_import,
4286 .setkey = ahash_setkey,
4287 .halg = {
4288 .digestsize = SHA1_DIGEST_SIZE,
4289 .statesize = sizeof(struct caam_export_state),
4290 },
4291 },
4292 .alg_type = OP_ALG_ALGSEL_SHA1,
4293 }, {
4294 .name = "sha224",
4295 .driver_name = "sha224-caam-qi2",
4296 .hmac_name = "hmac(sha224)",
4297 .hmac_driver_name = "hmac-sha224-caam-qi2",
4298 .blocksize = SHA224_BLOCK_SIZE,
4299 .template_ahash = {
4300 .init = ahash_init,
4301 .update = ahash_update,
4302 .final = ahash_final,
4303 .finup = ahash_finup,
4304 .digest = ahash_digest,
4305 .export = ahash_export,
4306 .import = ahash_import,
4307 .setkey = ahash_setkey,
4308 .halg = {
4309 .digestsize = SHA224_DIGEST_SIZE,
4310 .statesize = sizeof(struct caam_export_state),
4311 },
4312 },
4313 .alg_type = OP_ALG_ALGSEL_SHA224,
4314 }, {
4315 .name = "sha256",
4316 .driver_name = "sha256-caam-qi2",
4317 .hmac_name = "hmac(sha256)",
4318 .hmac_driver_name = "hmac-sha256-caam-qi2",
4319 .blocksize = SHA256_BLOCK_SIZE,
4320 .template_ahash = {
4321 .init = ahash_init,
4322 .update = ahash_update,
4323 .final = ahash_final,
4324 .finup = ahash_finup,
4325 .digest = ahash_digest,
4326 .export = ahash_export,
4327 .import = ahash_import,
4328 .setkey = ahash_setkey,
4329 .halg = {
4330 .digestsize = SHA256_DIGEST_SIZE,
4331 .statesize = sizeof(struct caam_export_state),
4332 },
4333 },
4334 .alg_type = OP_ALG_ALGSEL_SHA256,
4335 }, {
4336 .name = "sha384",
4337 .driver_name = "sha384-caam-qi2",
4338 .hmac_name = "hmac(sha384)",
4339 .hmac_driver_name = "hmac-sha384-caam-qi2",
4340 .blocksize = SHA384_BLOCK_SIZE,
4341 .template_ahash = {
4342 .init = ahash_init,
4343 .update = ahash_update,
4344 .final = ahash_final,
4345 .finup = ahash_finup,
4346 .digest = ahash_digest,
4347 .export = ahash_export,
4348 .import = ahash_import,
4349 .setkey = ahash_setkey,
4350 .halg = {
4351 .digestsize = SHA384_DIGEST_SIZE,
4352 .statesize = sizeof(struct caam_export_state),
4353 },
4354 },
4355 .alg_type = OP_ALG_ALGSEL_SHA384,
4356 }, {
4357 .name = "sha512",
4358 .driver_name = "sha512-caam-qi2",
4359 .hmac_name = "hmac(sha512)",
4360 .hmac_driver_name = "hmac-sha512-caam-qi2",
4361 .blocksize = SHA512_BLOCK_SIZE,
4362 .template_ahash = {
4363 .init = ahash_init,
4364 .update = ahash_update,
4365 .final = ahash_final,
4366 .finup = ahash_finup,
4367 .digest = ahash_digest,
4368 .export = ahash_export,
4369 .import = ahash_import,
4370 .setkey = ahash_setkey,
4371 .halg = {
4372 .digestsize = SHA512_DIGEST_SIZE,
4373 .statesize = sizeof(struct caam_export_state),
4374 },
4375 },
4376 .alg_type = OP_ALG_ALGSEL_SHA512,
4377 }, {
4378 .name = "md5",
4379 .driver_name = "md5-caam-qi2",
4380 .hmac_name = "hmac(md5)",
4381 .hmac_driver_name = "hmac-md5-caam-qi2",
4382 .blocksize = MD5_BLOCK_WORDS * 4,
4383 .template_ahash = {
4384 .init = ahash_init,
4385 .update = ahash_update,
4386 .final = ahash_final,
4387 .finup = ahash_finup,
4388 .digest = ahash_digest,
4389 .export = ahash_export,
4390 .import = ahash_import,
4391 .setkey = ahash_setkey,
4392 .halg = {
4393 .digestsize = MD5_DIGEST_SIZE,
4394 .statesize = sizeof(struct caam_export_state),
4395 },
4396 },
4397 .alg_type = OP_ALG_ALGSEL_MD5,
4398 }
4399};
4400
4401struct caam_hash_alg {
4402 struct list_head entry;
4403 struct device *dev;
4404 int alg_type;
4405 struct ahash_alg ahash_alg;
4406};
4407
4408static int caam_hash_cra_init(struct crypto_tfm *tfm)
4409{
4410 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4411 struct crypto_alg *base = tfm->__crt_alg;
4412 struct hash_alg_common *halg =
4413 container_of(base, struct hash_alg_common, base);
4414 struct ahash_alg *alg =
4415 container_of(halg, struct ahash_alg, halg);
4416 struct caam_hash_alg *caam_hash =
4417 container_of(alg, struct caam_hash_alg, ahash_alg);
4418 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4419 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4420 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4421 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4422 HASH_MSG_LEN + 32,
4423 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4424 HASH_MSG_LEN + 64,
4425 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4426 dma_addr_t dma_addr;
4427 int i;
4428
4429 ctx->dev = caam_hash->dev;
4430
4431 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4432 DMA_BIDIRECTIONAL,
4433 DMA_ATTR_SKIP_CPU_SYNC);
4434 if (dma_mapping_error(ctx->dev, dma_addr)) {
4435 dev_err(ctx->dev, "unable to map shared descriptors\n");
4436 return -ENOMEM;
4437 }
4438
4439 for (i = 0; i < HASH_NUM_OP; i++)
4440 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4441
4442 /* copy descriptor header template value */
4443 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4444
4445 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4446 OP_ALG_ALGSEL_SUBMASK) >>
4447 OP_ALG_ALGSEL_SHIFT];
4448
4449 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4450 sizeof(struct caam_hash_state));
4451
4452 return ahash_set_sh_desc(ahash);
4453}
4454
4455static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4456{
4457 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4458
4459 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4460 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4461}
4462
4463static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4464 struct caam_hash_template *template, bool keyed)
4465{
4466 struct caam_hash_alg *t_alg;
4467 struct ahash_alg *halg;
4468 struct crypto_alg *alg;
4469
4470 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4471 if (!t_alg)
4472 return ERR_PTR(-ENOMEM);
4473
4474 t_alg->ahash_alg = template->template_ahash;
4475 halg = &t_alg->ahash_alg;
4476 alg = &halg->halg.base;
4477
4478 if (keyed) {
4479 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4480 template->hmac_name);
4481 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4482 template->hmac_driver_name);
4483 } else {
4484 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4485 template->name);
4486 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4487 template->driver_name);
4488 t_alg->ahash_alg.setkey = NULL;
4489 }
4490 alg->cra_module = THIS_MODULE;
4491 alg->cra_init = caam_hash_cra_init;
4492 alg->cra_exit = caam_hash_cra_exit;
4493 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4494 alg->cra_priority = CAAM_CRA_PRIORITY;
4495 alg->cra_blocksize = template->blocksize;
4496 alg->cra_alignmask = 0;
4497 alg->cra_flags = CRYPTO_ALG_ASYNC;
4498
4499 t_alg->alg_type = template->alg_type;
4500 t_alg->dev = dev;
4501
4502 return t_alg;
4503}
4504
8d818c10
HG
4505static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4506{
4507 struct dpaa2_caam_priv_per_cpu *ppriv;
4508
4509 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4510 napi_schedule_irqoff(&ppriv->napi);
4511}
4512
4513static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4514{
4515 struct device *dev = priv->dev;
4516 struct dpaa2_io_notification_ctx *nctx;
4517 struct dpaa2_caam_priv_per_cpu *ppriv;
4518 int err, i = 0, cpu;
4519
4520 for_each_online_cpu(cpu) {
4521 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4522 ppriv->priv = priv;
4523 nctx = &ppriv->nctx;
4524 nctx->is_cdan = 0;
4525 nctx->id = ppriv->rsp_fqid;
4526 nctx->desired_cpu = cpu;
4527 nctx->cb = dpaa2_caam_fqdan_cb;
4528
4529 /* Register notification callbacks */
ac5d15b4
HG
4530 ppriv->dpio = dpaa2_io_service_select(cpu);
4531 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
8d818c10
HG
4532 if (unlikely(err)) {
4533 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4534 nctx->cb = NULL;
4535 /*
4536 * If no affine DPIO for this core, there's probably
4537 * none available for next cores either. Signal we want
4538 * to retry later, in case the DPIO devices weren't
4539 * probed yet.
4540 */
4541 err = -EPROBE_DEFER;
4542 goto err;
4543 }
4544
4545 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4546 dev);
4547 if (unlikely(!ppriv->store)) {
4548 dev_err(dev, "dpaa2_io_store_create() failed\n");
4549 err = -ENOMEM;
4550 goto err;
4551 }
4552
4553 if (++i == priv->num_pairs)
4554 break;
4555 }
4556
4557 return 0;
4558
4559err:
4560 for_each_online_cpu(cpu) {
4561 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4562 if (!ppriv->nctx.cb)
4563 break;
ac5d15b4 4564 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
8d818c10
HG
4565 }
4566
4567 for_each_online_cpu(cpu) {
4568 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4569 if (!ppriv->store)
4570 break;
4571 dpaa2_io_store_destroy(ppriv->store);
4572 }
4573
4574 return err;
4575}
4576
4577static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4578{
4579 struct dpaa2_caam_priv_per_cpu *ppriv;
4580 int i = 0, cpu;
4581
4582 for_each_online_cpu(cpu) {
4583 ppriv = per_cpu_ptr(priv->ppriv, cpu);
ac5d15b4
HG
4584 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4585 priv->dev);
8d818c10
HG
4586 dpaa2_io_store_destroy(ppriv->store);
4587
4588 if (++i == priv->num_pairs)
4589 return;
4590 }
4591}
4592
4593static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4594{
4595 struct dpseci_rx_queue_cfg rx_queue_cfg;
4596 struct device *dev = priv->dev;
4597 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4598 struct dpaa2_caam_priv_per_cpu *ppriv;
4599 int err = 0, i = 0, cpu;
4600
4601 /* Configure Rx queues */
4602 for_each_online_cpu(cpu) {
4603 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4604
4605 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4606 DPSECI_QUEUE_OPT_USER_CTX;
4607 rx_queue_cfg.order_preservation_en = 0;
4608 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4609 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4610 /*
4611 * Rx priority (WQ) doesn't really matter, since we use
4612 * pull mode, i.e. volatile dequeues from specific FQs
4613 */
4614 rx_queue_cfg.dest_cfg.priority = 0;
4615 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4616
4617 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4618 &rx_queue_cfg);
4619 if (err) {
4620 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4621 err);
4622 return err;
4623 }
4624
4625 if (++i == priv->num_pairs)
4626 break;
4627 }
4628
4629 return err;
4630}
4631
4632static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4633{
4634 struct device *dev = priv->dev;
4635
4636 if (!priv->cscn_mem)
4637 return;
4638
4639 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4640 kfree(priv->cscn_mem);
4641}
4642
4643static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4644{
4645 struct device *dev = priv->dev;
4646 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4647
4648 dpaa2_dpseci_congestion_free(priv);
4649 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4650}
4651
4652static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4653 const struct dpaa2_fd *fd)
4654{
4655 struct caam_request *req;
4656 u32 fd_err;
4657
4658 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4659 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4660 return;
4661 }
4662
4663 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4664 if (unlikely(fd_err))
4665 dev_err(priv->dev, "FD error: %08x\n", fd_err);
4666
4667 /*
4668 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4669 * in FD[ERR] or FD[FRC].
4670 */
4671 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4672 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4673 DMA_BIDIRECTIONAL);
4674 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4675}
4676
4677static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4678{
4679 int err;
4680
4681 /* Retry while portal is busy */
4682 do {
ac5d15b4 4683 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
8d818c10
HG
4684 ppriv->store);
4685 } while (err == -EBUSY);
4686
4687 if (unlikely(err))
4688 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4689
4690 return err;
4691}
4692
4693static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4694{
4695 struct dpaa2_dq *dq;
4696 int cleaned = 0, is_last;
4697
4698 do {
4699 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4700 if (unlikely(!dq)) {
4701 if (unlikely(!is_last)) {
4702 dev_dbg(ppriv->priv->dev,
4703 "FQ %d returned no valid frames\n",
4704 ppriv->rsp_fqid);
4705 /*
4706 * MUST retry until we get some sort of
4707 * valid response token (be it "empty dequeue"
4708 * or a valid frame).
4709 */
4710 continue;
4711 }
4712 break;
4713 }
4714
4715 /* Process FD */
4716 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4717 cleaned++;
4718 } while (!is_last);
4719
4720 return cleaned;
4721}
4722
4723static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4724{
4725 struct dpaa2_caam_priv_per_cpu *ppriv;
4726 struct dpaa2_caam_priv *priv;
4727 int err, cleaned = 0, store_cleaned;
4728
4729 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4730 priv = ppriv->priv;
4731
4732 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4733 return 0;
4734
4735 do {
4736 store_cleaned = dpaa2_caam_store_consume(ppriv);
4737 cleaned += store_cleaned;
4738
4739 if (store_cleaned == 0 ||
4740 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4741 break;
4742
4743 /* Try to dequeue some more */
4744 err = dpaa2_caam_pull_fq(ppriv);
4745 if (unlikely(err))
4746 break;
4747 } while (1);
4748
4749 if (cleaned < budget) {
4750 napi_complete_done(napi, cleaned);
ac5d15b4 4751 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
8d818c10
HG
4752 if (unlikely(err))
4753 dev_err(priv->dev, "Notification rearm failed: %d\n",
4754 err);
4755 }
4756
4757 return cleaned;
4758}
4759
4760static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4761 u16 token)
4762{
4763 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4764 struct device *dev = priv->dev;
4765 int err;
4766
4767 /*
4768 * Congestion group feature supported starting with DPSECI API v5.1
4769 * and only when object has been created with this capability.
4770 */
4771 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4772 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4773 return 0;
4774
4775 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4776 GFP_KERNEL | GFP_DMA);
4777 if (!priv->cscn_mem)
4778 return -ENOMEM;
4779
4780 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4781 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4782 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4783 if (dma_mapping_error(dev, priv->cscn_dma)) {
4784 dev_err(dev, "Error mapping CSCN memory area\n");
4785 err = -ENOMEM;
4786 goto err_dma_map;
4787 }
4788
4789 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4790 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4791 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4792 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4793 cong_notif_cfg.message_iova = priv->cscn_dma;
4794 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4795 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4796 DPSECI_CGN_MODE_COHERENT_WRITE;
4797
4798 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4799 &cong_notif_cfg);
4800 if (err) {
4801 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4802 goto err_set_cong;
4803 }
4804
4805 return 0;
4806
4807err_set_cong:
4808 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4809err_dma_map:
4810 kfree(priv->cscn_mem);
4811
4812 return err;
4813}
4814
4815static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4816{
4817 struct device *dev = &ls_dev->dev;
4818 struct dpaa2_caam_priv *priv;
4819 struct dpaa2_caam_priv_per_cpu *ppriv;
4820 int err, cpu;
4821 u8 i;
4822
4823 priv = dev_get_drvdata(dev);
4824
4825 priv->dev = dev;
4826 priv->dpsec_id = ls_dev->obj_desc.id;
4827
4828 /* Get a handle for the DPSECI this interface is associate with */
4829 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4830 if (err) {
4831 dev_err(dev, "dpseci_open() failed: %d\n", err);
4832 goto err_open;
4833 }
4834
4835 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4836 &priv->minor_ver);
4837 if (err) {
4838 dev_err(dev, "dpseci_get_api_version() failed\n");
4839 goto err_get_vers;
4840 }
4841
4842 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4843
4844 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4845 &priv->dpseci_attr);
4846 if (err) {
4847 dev_err(dev, "dpseci_get_attributes() failed\n");
4848 goto err_get_vers;
4849 }
4850
4851 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4852 &priv->sec_attr);
4853 if (err) {
4854 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4855 goto err_get_vers;
4856 }
4857
4858 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4859 if (err) {
4860 dev_err(dev, "setup_congestion() failed\n");
4861 goto err_get_vers;
4862 }
4863
4864 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4865 priv->dpseci_attr.num_tx_queues);
4866 if (priv->num_pairs > num_online_cpus()) {
4867 dev_warn(dev, "%d queues won't be used\n",
4868 priv->num_pairs - num_online_cpus());
4869 priv->num_pairs = num_online_cpus();
4870 }
4871
4872 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4873 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4874 &priv->rx_queue_attr[i]);
4875 if (err) {
4876 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4877 goto err_get_rx_queue;
4878 }
4879 }
4880
4881 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
4882 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4883 &priv->tx_queue_attr[i]);
4884 if (err) {
4885 dev_err(dev, "dpseci_get_tx_queue() failed\n");
4886 goto err_get_rx_queue;
4887 }
4888 }
4889
4890 i = 0;
4891 for_each_online_cpu(cpu) {
ac5d15b4
HG
4892 u8 j;
4893
4894 j = i % priv->num_pairs;
8d818c10
HG
4895
4896 ppriv = per_cpu_ptr(priv->ppriv, cpu);
ac5d15b4
HG
4897 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
4898
4899 /*
4900 * Allow all cores to enqueue, while only some of them
4901 * will take part in dequeuing.
4902 */
4903 if (++i > priv->num_pairs)
4904 continue;
4905
4906 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
4907 ppriv->prio = j;
4908
4909 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
4910 priv->rx_queue_attr[j].fqid,
4911 priv->tx_queue_attr[j].fqid);
8d818c10
HG
4912
4913 ppriv->net_dev.dev = *dev;
4914 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
4915 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
4916 DPAA2_CAAM_NAPI_WEIGHT);
8d818c10
HG
4917 }
4918
4919 return 0;
4920
4921err_get_rx_queue:
4922 dpaa2_dpseci_congestion_free(priv);
4923err_get_vers:
4924 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4925err_open:
4926 return err;
4927}
4928
4929static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
4930{
4931 struct device *dev = priv->dev;
4932 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4933 struct dpaa2_caam_priv_per_cpu *ppriv;
4934 int i;
4935
4936 for (i = 0; i < priv->num_pairs; i++) {
4937 ppriv = per_cpu_ptr(priv->ppriv, i);
4938 napi_enable(&ppriv->napi);
4939 }
4940
4941 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
4942}
4943
4944static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
4945{
4946 struct device *dev = priv->dev;
4947 struct dpaa2_caam_priv_per_cpu *ppriv;
4948 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4949 int i, err = 0, enabled;
4950
4951 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
4952 if (err) {
4953 dev_err(dev, "dpseci_disable() failed\n");
4954 return err;
4955 }
4956
4957 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
4958 if (err) {
4959 dev_err(dev, "dpseci_is_enabled() failed\n");
4960 return err;
4961 }
4962
4963 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
4964
4965 for (i = 0; i < priv->num_pairs; i++) {
4966 ppriv = per_cpu_ptr(priv->ppriv, i);
4967 napi_disable(&ppriv->napi);
4968 netif_napi_del(&ppriv->napi);
4969 }
4970
4971 return 0;
4972}
4973
3f16f6c9
HG
4974static struct list_head hash_list;
4975
8d818c10
HG
4976static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
4977{
4978 struct device *dev;
4979 struct dpaa2_caam_priv *priv;
4980 int i, err = 0;
4981 bool registered = false;
4982
4983 /*
4984 * There is no way to get CAAM endianness - there is no direct register
4985 * space access and MC f/w does not provide this attribute.
4986 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
4987 * property.
4988 */
4989 caam_little_end = true;
4990
4991 caam_imx = false;
4992
4993 dev = &dpseci_dev->dev;
4994
4995 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
4996 if (!priv)
4997 return -ENOMEM;
4998
4999 dev_set_drvdata(dev, priv);
5000
5001 priv->domain = iommu_get_domain_for_dev(dev);
5002
5003 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5004 0, SLAB_CACHE_DMA, NULL);
5005 if (!qi_cache) {
5006 dev_err(dev, "Can't allocate SEC cache\n");
5007 return -ENOMEM;
5008 }
5009
5010 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5011 if (err) {
5012 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5013 goto err_dma_mask;
5014 }
5015
5016 /* Obtain a MC portal */
5017 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5018 if (err) {
5019 if (err == -ENXIO)
5020 err = -EPROBE_DEFER;
5021 else
5022 dev_err(dev, "MC portal allocation failed\n");
5023
5024 goto err_dma_mask;
5025 }
5026
5027 priv->ppriv = alloc_percpu(*priv->ppriv);
5028 if (!priv->ppriv) {
5029 dev_err(dev, "alloc_percpu() failed\n");
5030 err = -ENOMEM;
5031 goto err_alloc_ppriv;
5032 }
5033
5034 /* DPSECI initialization */
5035 err = dpaa2_dpseci_setup(dpseci_dev);
5036 if (err) {
5037 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5038 goto err_dpseci_setup;
5039 }
5040
5041 /* DPIO */
5042 err = dpaa2_dpseci_dpio_setup(priv);
5043 if (err) {
5044 if (err != -EPROBE_DEFER)
5045 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5046 goto err_dpio_setup;
5047 }
5048
5049 /* DPSECI binding to DPIO */
5050 err = dpaa2_dpseci_bind(priv);
5051 if (err) {
5052 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5053 goto err_bind;
5054 }
5055
5056 /* DPSECI enable */
5057 err = dpaa2_dpseci_enable(priv);
5058 if (err) {
5059 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5060 goto err_bind;
5061 }
5062
5063 /* register crypto algorithms the device supports */
226853ac
HG
5064 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5065 struct caam_skcipher_alg *t_alg = driver_algs + i;
5066 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5067
5068 /* Skip DES algorithms if not supported by device */
5069 if (!priv->sec_attr.des_acc_num &&
5070 (alg_sel == OP_ALG_ALGSEL_3DES ||
5071 alg_sel == OP_ALG_ALGSEL_DES))
5072 continue;
5073
5074 /* Skip AES algorithms if not supported by device */
5075 if (!priv->sec_attr.aes_acc_num &&
5076 alg_sel == OP_ALG_ALGSEL_AES)
5077 continue;
5078
c99d4a24
HG
5079 /* Skip CHACHA20 algorithms if not supported by device */
5080 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5081 !priv->sec_attr.ccha_acc_num)
5082 continue;
5083
226853ac
HG
5084 t_alg->caam.dev = dev;
5085 caam_skcipher_alg_init(t_alg);
5086
5087 err = crypto_register_skcipher(&t_alg->skcipher);
5088 if (err) {
5089 dev_warn(dev, "%s alg registration failed: %d\n",
5090 t_alg->skcipher.base.cra_driver_name, err);
5091 continue;
5092 }
5093
5094 t_alg->registered = true;
5095 registered = true;
5096 }
5097
8d818c10
HG
5098 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5099 struct caam_aead_alg *t_alg = driver_aeads + i;
5100 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5101 OP_ALG_ALGSEL_MASK;
5102 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5103 OP_ALG_ALGSEL_MASK;
5104
5105 /* Skip DES algorithms if not supported by device */
5106 if (!priv->sec_attr.des_acc_num &&
5107 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5108 c1_alg_sel == OP_ALG_ALGSEL_DES))
5109 continue;
5110
5111 /* Skip AES algorithms if not supported by device */
5112 if (!priv->sec_attr.aes_acc_num &&
5113 c1_alg_sel == OP_ALG_ALGSEL_AES)
5114 continue;
5115
c10a5336
HG
5116 /* Skip CHACHA20 algorithms if not supported by device */
5117 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5118 !priv->sec_attr.ccha_acc_num)
5119 continue;
5120
5121 /* Skip POLY1305 algorithms if not supported by device */
5122 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5123 !priv->sec_attr.ptha_acc_num)
5124 continue;
5125
8d818c10
HG
5126 /*
5127 * Skip algorithms requiring message digests
5128 * if MD not supported by device.
5129 */
c10a5336
HG
5130 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5131 !priv->sec_attr.md_acc_num)
8d818c10
HG
5132 continue;
5133
5134 t_alg->caam.dev = dev;
5135 caam_aead_alg_init(t_alg);
5136
5137 err = crypto_register_aead(&t_alg->aead);
5138 if (err) {
5139 dev_warn(dev, "%s alg registration failed: %d\n",
5140 t_alg->aead.base.cra_driver_name, err);
5141 continue;
5142 }
5143
5144 t_alg->registered = true;
5145 registered = true;
5146 }
5147 if (registered)
5148 dev_info(dev, "algorithms registered in /proc/crypto\n");
5149
3f16f6c9
HG
5150 /* register hash algorithms the device supports */
5151 INIT_LIST_HEAD(&hash_list);
5152
5153 /*
5154 * Skip registration of any hashing algorithms if MD block
5155 * is not present.
5156 */
5157 if (!priv->sec_attr.md_acc_num)
5158 return 0;
5159
5160 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5161 struct caam_hash_alg *t_alg;
5162 struct caam_hash_template *alg = driver_hash + i;
5163
5164 /* register hmac version */
5165 t_alg = caam_hash_alloc(dev, alg, true);
5166 if (IS_ERR(t_alg)) {
5167 err = PTR_ERR(t_alg);
5168 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5169 alg->driver_name, err);
5170 continue;
5171 }
5172
5173 err = crypto_register_ahash(&t_alg->ahash_alg);
5174 if (err) {
5175 dev_warn(dev, "%s alg registration failed: %d\n",
5176 t_alg->ahash_alg.halg.base.cra_driver_name,
5177 err);
5178 kfree(t_alg);
5179 } else {
5180 list_add_tail(&t_alg->entry, &hash_list);
5181 }
5182
5183 /* register unkeyed version */
5184 t_alg = caam_hash_alloc(dev, alg, false);
5185 if (IS_ERR(t_alg)) {
5186 err = PTR_ERR(t_alg);
5187 dev_warn(dev, "%s alg allocation failed: %d\n",
5188 alg->driver_name, err);
5189 continue;
5190 }
5191
5192 err = crypto_register_ahash(&t_alg->ahash_alg);
5193 if (err) {
5194 dev_warn(dev, "%s alg registration failed: %d\n",
5195 t_alg->ahash_alg.halg.base.cra_driver_name,
5196 err);
5197 kfree(t_alg);
5198 } else {
5199 list_add_tail(&t_alg->entry, &hash_list);
5200 }
5201 }
5202 if (!list_empty(&hash_list))
5203 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5204
8d818c10
HG
5205 return err;
5206
5207err_bind:
5208 dpaa2_dpseci_dpio_free(priv);
5209err_dpio_setup:
5210 dpaa2_dpseci_free(priv);
5211err_dpseci_setup:
5212 free_percpu(priv->ppriv);
5213err_alloc_ppriv:
5214 fsl_mc_portal_free(priv->mc_io);
5215err_dma_mask:
5216 kmem_cache_destroy(qi_cache);
5217
5218 return err;
5219}
5220
5221static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5222{
5223 struct device *dev;
5224 struct dpaa2_caam_priv *priv;
5225 int i;
5226
5227 dev = &ls_dev->dev;
5228 priv = dev_get_drvdata(dev);
5229
5230 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5231 struct caam_aead_alg *t_alg = driver_aeads + i;
5232
5233 if (t_alg->registered)
5234 crypto_unregister_aead(&t_alg->aead);
5235 }
5236
226853ac
HG
5237 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5238 struct caam_skcipher_alg *t_alg = driver_algs + i;
5239
5240 if (t_alg->registered)
5241 crypto_unregister_skcipher(&t_alg->skcipher);
5242 }
5243
3f16f6c9
HG
5244 if (hash_list.next) {
5245 struct caam_hash_alg *t_hash_alg, *p;
5246
5247 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5248 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5249 list_del(&t_hash_alg->entry);
5250 kfree(t_hash_alg);
5251 }
5252 }
5253
8d818c10
HG
5254 dpaa2_dpseci_disable(priv);
5255 dpaa2_dpseci_dpio_free(priv);
5256 dpaa2_dpseci_free(priv);
5257 free_percpu(priv->ppriv);
5258 fsl_mc_portal_free(priv->mc_io);
5259 kmem_cache_destroy(qi_cache);
5260
5261 return 0;
5262}
5263
5264int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5265{
5266 struct dpaa2_fd fd;
5267 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
ac5d15b4
HG
5268 struct dpaa2_caam_priv_per_cpu *ppriv;
5269 int err = 0, i;
8d818c10
HG
5270
5271 if (IS_ERR(req))
5272 return PTR_ERR(req);
5273
5274 if (priv->cscn_mem) {
5275 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5276 DPAA2_CSCN_SIZE,
5277 DMA_FROM_DEVICE);
5278 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5279 dev_dbg_ratelimited(dev, "Dropping request\n");
5280 return -EBUSY;
5281 }
5282 }
5283
5284 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5285
5286 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5287 DMA_BIDIRECTIONAL);
5288 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5289 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5290 goto err_out;
5291 }
5292
5293 memset(&fd, 0, sizeof(fd));
5294 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5295 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5296 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5297 dpaa2_fd_set_flc(&fd, req->flc_dma);
5298
ac5d15b4 5299 ppriv = this_cpu_ptr(priv->ppriv);
8d818c10 5300 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
ac5d15b4 5301 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
8d818c10
HG
5302 &fd);
5303 if (err != -EBUSY)
5304 break;
05bd1bee
HG
5305
5306 cpu_relax();
8d818c10 5307 }
8d818c10
HG
5308
5309 if (unlikely(err)) {
f1657eb9 5310 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
8d818c10
HG
5311 goto err_out;
5312 }
5313
5314 return -EINPROGRESS;
5315
5316err_out:
5317 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5318 DMA_BIDIRECTIONAL);
5319 return -EIO;
5320}
5321EXPORT_SYMBOL(dpaa2_caam_enqueue);
5322
5323static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5324 {
5325 .vendor = FSL_MC_VENDOR_FREESCALE,
5326 .obj_type = "dpseci",
5327 },
5328 { .vendor = 0x0 }
5329};
5330
5331static struct fsl_mc_driver dpaa2_caam_driver = {
5332 .driver = {
5333 .name = KBUILD_MODNAME,
5334 .owner = THIS_MODULE,
5335 },
5336 .probe = dpaa2_caam_probe,
5337 .remove = dpaa2_caam_remove,
5338 .match_id_table = dpaa2_caam_match_id_table
5339};
5340
5341MODULE_LICENSE("Dual BSD/GPL");
5342MODULE_AUTHOR("Freescale Semiconductor, Inc");
5343MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5344
5345module_fsl_mc_driver(dpaa2_caam_driver);