]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/chelsio/chcr_algo.c
crypto: caam/qi - don't leak pointers to authenc keys
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
8f6acb7f 56#include <crypto/gcm.h>
324429d7 57#include <crypto/sha.h>
2debd332 58#include <crypto/authenc.h>
b8fd1f41
HJ
59#include <crypto/ctr.h>
60#include <crypto/gf128mul.h>
2debd332
HJ
61#include <crypto/internal/aead.h>
62#include <crypto/null.h>
63#include <crypto/internal/skcipher.h>
64#include <crypto/aead.h>
65#include <crypto/scatterwalk.h>
324429d7
HS
66#include <crypto/internal/hash.h>
67
68#include "t4fw_api.h"
69#include "t4_msg.h"
70#include "chcr_core.h"
71#include "chcr_algo.h"
72#include "chcr_crypto.h"
73
2f47d580
HJ
74#define IV AES_BLOCK_SIZE
75
8579e076
CIK
76static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81};
6dad4e8a 82
8579e076
CIK
83static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88};
6dad4e8a
AG
89
90static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94};
95
96static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
98
2debd332
HJ
99static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100{
101 return ctx->crypto_ctx->aeadctx;
102}
103
324429d7
HS
104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105{
106 return ctx->crypto_ctx->ablkctx;
107}
108
109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110{
111 return ctx->crypto_ctx->hmacctx;
112}
113
2debd332
HJ
114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115{
116 return gctx->ctx->gcm;
117}
118
119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120{
121 return gctx->ctx->authenc;
122}
123
324429d7
HS
124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125{
126 return ctx->dev->u_ctx;
127}
128
129static inline int is_ofld_imm(const struct sk_buff *skb)
130{
2f47d580 131 return (skb->len <= SGE_MAX_WR_LEN);
324429d7
HS
132}
133
5110e655
HJ
134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135{
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137}
138
2f47d580
HJ
139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 unsigned int entlen,
141 unsigned int skip)
2956f36c
HJ
142{
143 int nents = 0;
144 unsigned int less;
2f47d580 145 unsigned int skip_len = 0;
2956f36c 146
2f47d580
HJ
147 while (sg && skip) {
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
150 skip_len = 0;
151 sg = sg_next(sg);
152 } else {
153 skip_len = skip;
154 skip = 0;
155 }
2956f36c
HJ
156 }
157
2f47d580
HJ
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
161 reqlen -= less;
162 skip_len = 0;
163 sg = sg_next(sg);
164 }
2956f36c
HJ
165 return nents;
166}
167
6dad4e8a 168static inline int get_aead_subtype(struct crypto_aead *aead)
2f47d580 169{
6dad4e8a
AG
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
2f47d580 174}
2f47d580 175
6dad4e8a 176void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
2debd332
HJ
177{
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
182 int cmp = 0;
183
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
d600fc8a 187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
2debd332
HJ
188 } else {
189
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
d600fc8a 193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
2debd332
HJ
194 }
195 if (cmp)
196 *err = -EBADMSG;
197 else
198 *err = 0;
199}
200
6dad4e8a
AG
201static inline void chcr_handle_aead_resp(struct aead_request *req,
202 unsigned char *input,
203 int err)
204{
205 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
206 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
207 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
208
209 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
210 if (reqctx->b0_dma)
211 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
212 reqctx->b0_len, DMA_BIDIRECTIONAL);
213 if (reqctx->verify == VERIFY_SW) {
214 chcr_verify_tag(req, input, &err);
215 reqctx->verify = VERIFY_HW;
216 }
217 req->base.complete(&req->base, err);
218}
219
2f47d580 220static void get_aes_decrypt_key(unsigned char *dec_key,
39f91a34
HJ
221 const unsigned char *key,
222 unsigned int keylength)
223{
224 u32 temp;
225 u32 w_ring[MAX_NK];
226 int i, j, k;
227 u8 nr, nk;
228
229 switch (keylength) {
230 case AES_KEYLENGTH_128BIT:
231 nk = KEYLENGTH_4BYTES;
232 nr = NUMBER_OF_ROUNDS_10;
233 break;
234 case AES_KEYLENGTH_192BIT:
235 nk = KEYLENGTH_6BYTES;
236 nr = NUMBER_OF_ROUNDS_12;
237 break;
238 case AES_KEYLENGTH_256BIT:
239 nk = KEYLENGTH_8BYTES;
240 nr = NUMBER_OF_ROUNDS_14;
241 break;
242 default:
243 return;
244 }
245 for (i = 0; i < nk; i++)
246 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
247
248 i = 0;
249 temp = w_ring[nk - 1];
250 while (i + nk < (nr + 1) * 4) {
251 if (!(i % nk)) {
252 /* RotWord(temp) */
253 temp = (temp << 8) | (temp >> 24);
254 temp = aes_ks_subword(temp);
255 temp ^= round_constant[i / nk];
256 } else if (nk == 8 && (i % 4 == 0)) {
257 temp = aes_ks_subword(temp);
258 }
259 w_ring[i % nk] ^= temp;
260 temp = w_ring[i % nk];
261 i++;
262 }
263 i--;
264 for (k = 0, j = i % nk; k < nk; k++) {
265 *((u32 *)dec_key + k) = htonl(w_ring[j]);
266 j--;
267 if (j < 0)
268 j += nk;
269 }
270}
271
e7922729 272static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7 273{
ec1bca94 274 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
324429d7
HS
275
276 switch (ds) {
277 case SHA1_DIGEST_SIZE:
e7922729 278 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
279 break;
280 case SHA224_DIGEST_SIZE:
e7922729 281 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
282 break;
283 case SHA256_DIGEST_SIZE:
e7922729 284 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
285 break;
286 case SHA384_DIGEST_SIZE:
e7922729 287 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
288 break;
289 case SHA512_DIGEST_SIZE:
e7922729 290 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
291 break;
292 }
324429d7 293
e7922729 294 return base_hash;
324429d7
HS
295}
296
297static int chcr_compute_partial_hash(struct shash_desc *desc,
298 char *iopad, char *result_hash,
299 int digest_size)
300{
301 struct sha1_state sha1_st;
302 struct sha256_state sha256_st;
303 struct sha512_state sha512_st;
304 int error;
305
306 if (digest_size == SHA1_DIGEST_SIZE) {
307 error = crypto_shash_init(desc) ?:
308 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
309 crypto_shash_export(desc, (void *)&sha1_st);
310 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
311 } else if (digest_size == SHA224_DIGEST_SIZE) {
312 error = crypto_shash_init(desc) ?:
313 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
314 crypto_shash_export(desc, (void *)&sha256_st);
315 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
316
317 } else if (digest_size == SHA256_DIGEST_SIZE) {
318 error = crypto_shash_init(desc) ?:
319 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
320 crypto_shash_export(desc, (void *)&sha256_st);
321 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
322
323 } else if (digest_size == SHA384_DIGEST_SIZE) {
324 error = crypto_shash_init(desc) ?:
325 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
326 crypto_shash_export(desc, (void *)&sha512_st);
327 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
328
329 } else if (digest_size == SHA512_DIGEST_SIZE) {
330 error = crypto_shash_init(desc) ?:
331 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
332 crypto_shash_export(desc, (void *)&sha512_st);
333 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
334 } else {
335 error = -EINVAL;
336 pr_err("Unknown digest size %d\n", digest_size);
337 }
338 return error;
339}
340
341static void chcr_change_order(char *buf, int ds)
342{
343 int i;
344
345 if (ds == SHA512_DIGEST_SIZE) {
346 for (i = 0; i < (ds / sizeof(u64)); i++)
347 *((__be64 *)buf + i) =
348 cpu_to_be64(*((u64 *)buf + i));
349 } else {
350 for (i = 0; i < (ds / sizeof(u32)); i++)
351 *((__be32 *)buf + i) =
352 cpu_to_be32(*((u32 *)buf + i));
353 }
354}
355
356static inline int is_hmac(struct crypto_tfm *tfm)
357{
358 struct crypto_alg *alg = tfm->__crt_alg;
359 struct chcr_alg_template *chcr_crypto_alg =
360 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
361 alg.hash);
5c86a8ff 362 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
363 return 1;
364 return 0;
365}
366
2f47d580
HJ
367static inline void dsgl_walk_init(struct dsgl_walk *walk,
368 struct cpl_rx_phys_dsgl *dsgl)
324429d7 369{
2f47d580
HJ
370 walk->dsgl = dsgl;
371 walk->nents = 0;
372 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
373}
374
375static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
376{
377 struct cpl_rx_phys_dsgl *phys_cpl;
378
379 phys_cpl = walk->dsgl;
324429d7
HS
380
381 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
382 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
2f47d580
HJ
383 phys_cpl->pcirlxorder_to_noofsgentr =
384 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
385 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
386 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
387 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
388 CPL_RX_PHYS_DSGL_DCAID_V(0) |
389 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
390 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
391 phys_cpl->rss_hdr_int.qid = htons(qid);
392 phys_cpl->rss_hdr_int.hash_val = 0;
393}
394
395static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
396 size_t size,
397 dma_addr_t *addr)
398{
399 int j;
400
401 if (!size)
402 return;
403 j = walk->nents;
404 walk->to->len[j % 8] = htons(size);
405 walk->to->addr[j % 8] = cpu_to_be64(*addr);
406 j++;
407 if ((j % 8) == 0)
408 walk->to++;
409 walk->nents = j;
410}
411
412static void dsgl_walk_add_sg(struct dsgl_walk *walk,
413 struct scatterlist *sg,
414 unsigned int slen,
415 unsigned int skip)
416{
417 int skip_len = 0;
418 unsigned int left_size = slen, len = 0;
419 unsigned int j = walk->nents;
420 int offset, ent_len;
421
422 if (!slen)
423 return;
424 while (sg && skip) {
425 if (sg_dma_len(sg) <= skip) {
426 skip -= sg_dma_len(sg);
427 skip_len = 0;
428 sg = sg_next(sg);
429 } else {
430 skip_len = skip;
431 skip = 0;
432 }
433 }
434
2956f36c 435 while (left_size && sg) {
2f47d580 436 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
2956f36c
HJ
437 offset = 0;
438 while (len) {
2f47d580
HJ
439 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
440 walk->to->len[j % 8] = htons(ent_len);
441 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
442 offset + skip_len);
2956f36c
HJ
443 offset += ent_len;
444 len -= ent_len;
445 j++;
446 if ((j % 8) == 0)
2f47d580 447 walk->to++;
2956f36c 448 }
2f47d580
HJ
449 walk->last_sg = sg;
450 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
451 skip_len) + skip_len;
452 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
453 skip_len = 0;
2956f36c
HJ
454 sg = sg_next(sg);
455 }
2f47d580
HJ
456 walk->nents = j;
457}
458
459static inline void ulptx_walk_init(struct ulptx_walk *walk,
460 struct ulptx_sgl *ulp)
461{
462 walk->sgl = ulp;
463 walk->nents = 0;
464 walk->pair_idx = 0;
465 walk->pair = ulp->sge;
466 walk->last_sg = NULL;
467 walk->last_sg_len = 0;
468}
469
470static inline void ulptx_walk_end(struct ulptx_walk *walk)
471{
472 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
473 ULPTX_NSGE_V(walk->nents));
474}
2956f36c 475
2f47d580
HJ
476
477static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
478 size_t size,
479 dma_addr_t *addr)
480{
481 if (!size)
482 return;
483
484 if (walk->nents == 0) {
485 walk->sgl->len0 = cpu_to_be32(size);
486 walk->sgl->addr0 = cpu_to_be64(*addr);
487 } else {
488 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
489 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
490 walk->pair_idx = !walk->pair_idx;
491 if (!walk->pair_idx)
492 walk->pair++;
493 }
494 walk->nents++;
324429d7
HS
495}
496
2f47d580 497static void ulptx_walk_add_sg(struct ulptx_walk *walk,
adf1ca61 498 struct scatterlist *sg,
2f47d580
HJ
499 unsigned int len,
500 unsigned int skip)
324429d7 501{
2f47d580
HJ
502 int small;
503 int skip_len = 0;
504 unsigned int sgmin;
324429d7 505
2f47d580
HJ
506 if (!len)
507 return;
2f47d580
HJ
508 while (sg && skip) {
509 if (sg_dma_len(sg) <= skip) {
510 skip -= sg_dma_len(sg);
511 skip_len = 0;
512 sg = sg_next(sg);
513 } else {
514 skip_len = skip;
515 skip = 0;
516 }
517 }
8daa32b9
HJ
518 WARN(!sg, "SG should not be null here\n");
519 if (sg && (walk->nents == 0)) {
2f47d580
HJ
520 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
521 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
522 walk->sgl->len0 = cpu_to_be32(sgmin);
523 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
524 walk->nents++;
525 len -= sgmin;
526 walk->last_sg = sg;
527 walk->last_sg_len = sgmin + skip_len;
528 skip_len += sgmin;
529 if (sg_dma_len(sg) == skip_len) {
530 sg = sg_next(sg);
531 skip_len = 0;
532 }
533 }
534
535 while (sg && len) {
536 small = min(sg_dma_len(sg) - skip_len, len);
537 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
538 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
539 walk->pair->addr[walk->pair_idx] =
540 cpu_to_be64(sg_dma_address(sg) + skip_len);
541 walk->pair_idx = !walk->pair_idx;
542 walk->nents++;
543 if (!walk->pair_idx)
544 walk->pair++;
545 len -= sgmin;
546 skip_len += sgmin;
547 walk->last_sg = sg;
548 walk->last_sg_len = skip_len;
549 if (sg_dma_len(sg) == skip_len) {
550 sg = sg_next(sg);
551 skip_len = 0;
552 }
324429d7 553 }
324429d7
HS
554}
555
556static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
557{
558 struct crypto_alg *alg = tfm->__crt_alg;
559 struct chcr_alg_template *chcr_crypto_alg =
560 container_of(alg, struct chcr_alg_template, alg.crypto);
561
562 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
563}
564
b8fd1f41
HJ
565static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
566{
567 struct adapter *adap = netdev2adap(dev);
568 struct sge_uld_txq_info *txq_info =
569 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
570 struct sge_uld_txq *txq;
571 int ret = 0;
572
573 local_bh_disable();
574 txq = &txq_info->uldtxq[idx];
575 spin_lock(&txq->sendq.lock);
576 if (txq->full)
577 ret = -1;
578 spin_unlock(&txq->sendq.lock);
579 local_bh_enable();
580 return ret;
581}
582
324429d7
HS
583static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
584 struct _key_ctx *key_ctx)
585{
586 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 587 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
588 } else {
589 memcpy(key_ctx->key,
590 ablkctx->key + (ablkctx->enckey_len >> 1),
591 ablkctx->enckey_len >> 1);
cc1b156d
HJ
592 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
593 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
594 }
595 return 0;
596}
5110e655
HJ
597
598static int chcr_hash_ent_in_wr(struct scatterlist *src,
599 unsigned int minsg,
600 unsigned int space,
601 unsigned int srcskip)
602{
603 int srclen = 0;
604 int srcsg = minsg;
605 int soffset = 0, sless;
606
607 if (sg_dma_len(src) == srcskip) {
608 src = sg_next(src);
609 srcskip = 0;
610 }
611 while (src && space > (sgl_ent_len[srcsg + 1])) {
612 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
613 CHCR_SRC_SG_SIZE);
614 srclen += sless;
615 soffset += sless;
616 srcsg++;
617 if (sg_dma_len(src) == (soffset + srcskip)) {
618 src = sg_next(src);
619 soffset = 0;
620 srcskip = 0;
621 }
622 }
623 return srclen;
624}
625
b8fd1f41
HJ
626static int chcr_sg_ent_in_wr(struct scatterlist *src,
627 struct scatterlist *dst,
628 unsigned int minsg,
2f47d580
HJ
629 unsigned int space,
630 unsigned int srcskip,
631 unsigned int dstskip)
b8fd1f41
HJ
632{
633 int srclen = 0, dstlen = 0;
2f47d580 634 int srcsg = minsg, dstsg = minsg;
1d693cf6 635 int offset = 0, soffset = 0, less, sless = 0;
b8fd1f41 636
2f47d580
HJ
637 if (sg_dma_len(src) == srcskip) {
638 src = sg_next(src);
639 srcskip = 0;
640 }
641
642 if (sg_dma_len(dst) == dstskip) {
643 dst = sg_next(dst);
644 dstskip = 0;
645 }
646
647 while (src && dst &&
b8fd1f41 648 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
1d693cf6
HJ
649 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
650 CHCR_SRC_SG_SIZE);
651 srclen += sless;
b8fd1f41 652 srcsg++;
2956f36c 653 offset = 0;
b8fd1f41
HJ
654 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
655 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
656 if (srclen <= dstlen)
657 break;
2f47d580 658 less = min_t(unsigned int, sg_dma_len(dst) - offset -
db6deea4 659 dstskip, CHCR_DST_SG_SIZE);
2956f36c
HJ
660 dstlen += less;
661 offset += less;
1d693cf6 662 if ((offset + dstskip) == sg_dma_len(dst)) {
2956f36c
HJ
663 dst = sg_next(dst);
664 offset = 0;
665 }
b8fd1f41 666 dstsg++;
2f47d580 667 dstskip = 0;
b8fd1f41 668 }
1d693cf6
HJ
669 soffset += sless;
670 if ((soffset + srcskip) == sg_dma_len(src)) {
671 src = sg_next(src);
672 srcskip = 0;
673 soffset = 0;
674 }
675
b8fd1f41 676 }
b8fd1f41
HJ
677 return min(srclen, dstlen);
678}
679
680static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
681 u32 flags,
682 struct scatterlist *src,
683 struct scatterlist *dst,
684 unsigned int nbytes,
685 u8 *iv,
686 unsigned short op_type)
687{
688 int err;
689
690 SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
691 skcipher_request_set_tfm(subreq, cipher);
692 skcipher_request_set_callback(subreq, flags, NULL, NULL);
693 skcipher_request_set_crypt(subreq, src, dst,
694 nbytes, iv);
695
696 err = op_type ? crypto_skcipher_decrypt(subreq) :
697 crypto_skcipher_encrypt(subreq);
698 skcipher_request_zero(subreq);
699
700 return err;
324429d7 701
b8fd1f41 702}
324429d7 703static inline void create_wreq(struct chcr_context *ctx,
358961d1 704 struct chcr_wr *chcr_req,
2f47d580
HJ
705 struct crypto_async_request *req,
706 unsigned int imm,
570265bf 707 int hash_sz,
2f47d580 708 unsigned int len16,
2512a624
HJ
709 unsigned int sc_len,
710 unsigned int lcb)
324429d7
HS
711{
712 struct uld_ctx *u_ctx = ULD_CTX(ctx);
72a56ca9 713 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
324429d7 714
324429d7 715
570265bf 716 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
358961d1 717 chcr_req->wreq.pld_size_hash_size =
570265bf 718 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1 719 chcr_req->wreq.len16_pkd =
2f47d580 720 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
358961d1
HJ
721 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
722 chcr_req->wreq.rx_chid_to_rx_q_id =
8a13449f 723 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
570265bf 724 !!lcb, ctx->tx_qidx);
324429d7 725
8a13449f
HJ
726 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
727 qid);
2f47d580
HJ
728 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
729 ((sizeof(chcr_req->wreq)) >> 4)));
324429d7 730
2f47d580 731 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
358961d1 732 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
2f47d580 733 sizeof(chcr_req->key_ctx) + sc_len);
324429d7
HS
734}
735
736/**
737 * create_cipher_wr - form the WR for cipher operations
738 * @req: cipher req.
739 * @ctx: crypto driver context of the request.
740 * @qid: ingress qid where response of this WR should be received.
741 * @op_type: encryption or decryption
742 */
b8fd1f41 743static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
324429d7 744{
b8fd1f41 745 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2f47d580 746 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
324429d7 747 struct sk_buff *skb = NULL;
358961d1 748 struct chcr_wr *chcr_req;
324429d7 749 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 750 struct ulptx_sgl *ulptx;
b8fd1f41
HJ
751 struct chcr_blkcipher_req_ctx *reqctx =
752 ablkcipher_request_ctx(wrparam->req);
2f47d580 753 unsigned int temp = 0, transhdr_len, dst_size;
b8fd1f41 754 int error;
2956f36c 755 int nents;
2f47d580 756 unsigned int kctx_len;
b8fd1f41
HJ
757 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
758 GFP_KERNEL : GFP_ATOMIC;
2f47d580 759 struct adapter *adap = padap(c_ctx(tfm)->dev);
324429d7 760
2f47d580
HJ
761 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
762 reqctx->dst_ofst);
763 dst_size = get_space_for_phys_dsgl(nents + 1);
125d01ca 764 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580
HJ
765 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
766 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
767 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
125d01ca
HJ
768 temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
769 (sgl_len(nents + MIN_CIPHER_SG) * 8);
2f47d580 770 transhdr_len += temp;
125d01ca 771 transhdr_len = roundup(transhdr_len, 16);
2f47d580 772 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
b8fd1f41
HJ
773 if (!skb) {
774 error = -ENOMEM;
775 goto err;
776 }
de77b966 777 chcr_req = __skb_put_zero(skb, transhdr_len);
358961d1 778 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580 779 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
358961d1 780
2f47d580 781 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
358961d1 782 chcr_req->sec_cpl.aadstart_cipherstop_hi =
2f47d580 783 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
358961d1
HJ
784
785 chcr_req->sec_cpl.cipherstop_lo_authinsert =
786 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
b8fd1f41 787 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
324429d7 788 ablkctx->ciph_mode,
2f47d580 789 0, 0, IV >> 1);
358961d1 790 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
2f47d580 791 0, 0, dst_size);
324429d7 792
358961d1 793 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
b8fd1f41
HJ
794 if ((reqctx->op == CHCR_DECRYPT_OP) &&
795 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
796 CRYPTO_ALG_SUB_TYPE_CTR)) &&
797 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
798 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
358961d1 799 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7 800 } else {
b8fd1f41
HJ
801 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
802 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
358961d1
HJ
803 memcpy(chcr_req->key_ctx.key, ablkctx->key,
804 ablkctx->enckey_len);
324429d7 805 } else {
358961d1 806 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
807 (ablkctx->enckey_len >> 1),
808 ablkctx->enckey_len >> 1);
358961d1 809 memcpy(chcr_req->key_ctx.key +
324429d7
HS
810 (ablkctx->enckey_len >> 1),
811 ablkctx->key,
812 ablkctx->enckey_len >> 1);
813 }
814 }
358961d1 815 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
816 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
817 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
818 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
324429d7 819
ee0863ba 820 atomic_inc(&adap->chcr_stats.cipher_rqst);
2f47d580
HJ
821 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
822 +(reqctx->imm ? (IV + wrparam->bytes) : 0);
823 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
824 transhdr_len, temp,
2512a624 825 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
5c86a8ff 826 reqctx->skb = skb;
5fb78dba
HJ
827
828 if (reqctx->op && (ablkctx->ciph_mode ==
829 CHCR_SCMD_CIPHER_MODE_AES_CBC))
830 sg_pcopy_to_buffer(wrparam->req->src,
831 sg_nents(wrparam->req->src), wrparam->req->info, 16,
832 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
833
324429d7 834 return skb;
b8fd1f41
HJ
835err:
836 return ERR_PTR(error);
837}
838
839static inline int chcr_keyctx_ck_size(unsigned int keylen)
840{
841 int ck_size = 0;
842
843 if (keylen == AES_KEYSIZE_128)
844 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
845 else if (keylen == AES_KEYSIZE_192)
846 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
847 else if (keylen == AES_KEYSIZE_256)
848 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
849 else
850 ck_size = 0;
851
852 return ck_size;
853}
854static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
855 const u8 *key,
856 unsigned int keylen)
857{
858 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2f47d580 859 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
860 int err = 0;
861
862 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
863 crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
864 CRYPTO_TFM_REQ_MASK);
865 err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
866 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
867 tfm->crt_flags |=
868 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
869 CRYPTO_TFM_RES_MASK;
870 return err;
324429d7
HS
871}
872
b8fd1f41
HJ
873static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
874 const u8 *key,
324429d7
HS
875 unsigned int keylen)
876{
2f47d580 877 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7
HS
878 unsigned int ck_size, context_size;
879 u16 alignment = 0;
b8fd1f41 880 int err;
324429d7 881
b8fd1f41
HJ
882 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
883 if (err)
324429d7 884 goto badkey_err;
b8fd1f41
HJ
885
886 ck_size = chcr_keyctx_ck_size(keylen);
887 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
cc1b156d
HJ
888 memcpy(ablkctx->key, key, keylen);
889 ablkctx->enckey_len = keylen;
890 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
891 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
892 keylen + alignment) >> 4;
893
894 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
895 0, 0, context_size);
896 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
897 return 0;
898badkey_err:
b8fd1f41 899 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
324429d7 900 ablkctx->enckey_len = 0;
b8fd1f41
HJ
901
902 return err;
324429d7
HS
903}
904
b8fd1f41
HJ
905static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
906 const u8 *key,
907 unsigned int keylen)
324429d7 908{
2f47d580 909 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
910 unsigned int ck_size, context_size;
911 u16 alignment = 0;
912 int err;
913
914 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
915 if (err)
916 goto badkey_err;
917 ck_size = chcr_keyctx_ck_size(keylen);
918 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
919 memcpy(ablkctx->key, key, keylen);
920 ablkctx->enckey_len = keylen;
921 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
922 keylen + alignment) >> 4;
923
924 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
925 0, 0, context_size);
926 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
927
928 return 0;
929badkey_err:
930 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
931 ablkctx->enckey_len = 0;
932
933 return err;
934}
935
936static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
937 const u8 *key,
938 unsigned int keylen)
939{
2f47d580 940 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
941 unsigned int ck_size, context_size;
942 u16 alignment = 0;
943 int err;
944
945 if (keylen < CTR_RFC3686_NONCE_SIZE)
946 return -EINVAL;
947 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
948 CTR_RFC3686_NONCE_SIZE);
949
950 keylen -= CTR_RFC3686_NONCE_SIZE;
951 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
952 if (err)
953 goto badkey_err;
954
955 ck_size = chcr_keyctx_ck_size(keylen);
956 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
957 memcpy(ablkctx->key, key, keylen);
958 ablkctx->enckey_len = keylen;
959 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
960 keylen + alignment) >> 4;
961
962 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
963 0, 0, context_size);
964 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
965
966 return 0;
967badkey_err:
968 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
969 ablkctx->enckey_len = 0;
970
971 return err;
972}
973static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
974{
975 unsigned int size = AES_BLOCK_SIZE;
976 __be32 *b = (__be32 *)(dstiv + size);
977 u32 c, prev;
978
979 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
980 for (; size >= 4; size -= 4) {
981 prev = be32_to_cpu(*--b);
982 c = prev + add;
983 *b = cpu_to_be32(c);
984 if (prev < c)
985 break;
986 add = 1;
987 }
988
989}
990
991static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
992{
993 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
994 u64 c;
995 u32 temp = be32_to_cpu(*--b);
996
997 temp = ~temp;
998 c = (u64)temp + 1; // No of block can processed withou overflow
999 if ((bytes / AES_BLOCK_SIZE) > c)
1000 bytes = c * AES_BLOCK_SIZE;
1001 return bytes;
1002}
1003
209897d5
HJ
1004static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1005 u32 isfinal)
b8fd1f41
HJ
1006{
1007 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580 1008 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
b8fd1f41
HJ
1009 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1010 struct crypto_cipher *cipher;
1011 int ret, i;
1012 u8 *key;
1013 unsigned int keylen;
de1a00ac
HJ
1014 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1015 int round8 = round / 8;
b8fd1f41 1016
d3f1d2f7 1017 cipher = ablkctx->aes_generic;
de1a00ac 1018 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
b8fd1f41 1019
b8fd1f41
HJ
1020 keylen = ablkctx->enckey_len / 2;
1021 key = ablkctx->key + keylen;
1022 ret = crypto_cipher_setkey(cipher, key, keylen);
1023 if (ret)
d3f1d2f7 1024 goto out;
2f47d580 1025 /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
de1a00ac
HJ
1026 for (i = 0; i < round8; i++)
1027 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1028
1029 for (i = 0; i < (round % 8); i++)
b8fd1f41
HJ
1030 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1031
209897d5
HJ
1032 if (!isfinal)
1033 crypto_cipher_decrypt_one(cipher, iv, iv);
b8fd1f41
HJ
1034out:
1035 return ret;
1036}
1037
1038static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1039 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1040{
1041 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1042 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1043 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
ab677ff4 1044 int ret = 0;
324429d7 1045
b8fd1f41
HJ
1046 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1047 ctr_add_iv(iv, req->info, (reqctx->processed /
1048 AES_BLOCK_SIZE));
1049 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1050 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1051 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1052 AES_BLOCK_SIZE) + 1);
1053 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
209897d5 1054 ret = chcr_update_tweak(req, iv, 0);
b8fd1f41
HJ
1055 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1056 if (reqctx->op)
5fb78dba
HJ
1057 /*Updated before sending last WR*/
1058 memcpy(iv, req->info, AES_BLOCK_SIZE);
b8fd1f41
HJ
1059 else
1060 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1061 }
1062
324429d7 1063 return ret;
b8fd1f41 1064
324429d7
HS
1065}
1066
b8fd1f41
HJ
1067/* We need separate function for final iv because in rfc3686 Initial counter
1068 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1069 * for subsequent update requests
1070 */
1071
1072static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1073 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1074{
1075 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1076 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1077 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1078 int ret = 0;
1079
1080 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1081 ctr_add_iv(iv, req->info, (reqctx->processed /
1082 AES_BLOCK_SIZE));
1083 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
209897d5 1084 ret = chcr_update_tweak(req, iv, 1);
b8fd1f41 1085 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
5fb78dba
HJ
1086 /*Already updated for Decrypt*/
1087 if (!reqctx->op)
b8fd1f41
HJ
1088 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1089
1090 }
1091 return ret;
1092
1093}
1094
b8fd1f41
HJ
1095static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1096 unsigned char *input, int err)
324429d7
HS
1097{
1098 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580
HJ
1099 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1100 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
324429d7 1101 struct sk_buff *skb;
b8fd1f41
HJ
1102 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1103 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1104 struct cipher_wr_param wrparam;
1105 int bytes;
1106
b8fd1f41 1107 if (err)
2f47d580 1108 goto unmap;
b8fd1f41 1109 if (req->nbytes == reqctx->processed) {
2f47d580
HJ
1110 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1111 req);
b8fd1f41
HJ
1112 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1113 goto complete;
1114 }
1115
1116 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1117 c_ctx(tfm)->tx_qidx))) {
b8fd1f41
HJ
1118 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1119 err = -EBUSY;
2f47d580 1120 goto unmap;
b8fd1f41
HJ
1121 }
1122
1123 }
2f47d580
HJ
1124 if (!reqctx->imm) {
1125 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
5110e655 1126 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1127 reqctx->src_ofst, reqctx->dst_ofst);
db6deea4
HJ
1128 if ((bytes + reqctx->processed) >= req->nbytes)
1129 bytes = req->nbytes - reqctx->processed;
1130 else
125d01ca 1131 bytes = rounddown(bytes, 16);
2f47d580
HJ
1132 } else {
1133 /*CTR mode counter overfloa*/
1134 bytes = req->nbytes - reqctx->processed;
1135 }
1136 dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1137 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
b8fd1f41 1138 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
2f47d580
HJ
1139 dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1140 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
b8fd1f41 1141 if (err)
2f47d580 1142 goto unmap;
b8fd1f41
HJ
1143
1144 if (unlikely(bytes == 0)) {
2f47d580
HJ
1145 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1146 req);
b8fd1f41
HJ
1147 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1148 req->base.flags,
2f47d580
HJ
1149 req->src,
1150 req->dst,
1151 req->nbytes,
1152 req->info,
b8fd1f41
HJ
1153 reqctx->op);
1154 goto complete;
1155 }
1156
1157 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1158 CRYPTO_ALG_SUB_TYPE_CTR)
1159 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
2f47d580 1160 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
b8fd1f41
HJ
1161 wrparam.req = req;
1162 wrparam.bytes = bytes;
1163 skb = create_cipher_wr(&wrparam);
1164 if (IS_ERR(skb)) {
1165 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1166 err = PTR_ERR(skb);
2f47d580 1167 goto unmap;
b8fd1f41
HJ
1168 }
1169 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1170 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
b8fd1f41 1171 chcr_send_wr(skb);
2f47d580
HJ
1172 reqctx->last_req_len = bytes;
1173 reqctx->processed += bytes;
b8fd1f41 1174 return 0;
2f47d580
HJ
1175unmap:
1176 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1177complete:
1178 req->base.complete(&req->base, err);
1179 return err;
1180}
1181
1182static int process_cipher(struct ablkcipher_request *req,
1183 unsigned short qid,
1184 struct sk_buff **skb,
1185 unsigned short op_type)
1186{
1187 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1188 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1189 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2f47d580 1190 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
b8fd1f41 1191 struct cipher_wr_param wrparam;
2956f36c 1192 int bytes, err = -EINVAL;
b8fd1f41 1193
b8fd1f41
HJ
1194 reqctx->processed = 0;
1195 if (!req->info)
1196 goto error;
1197 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1198 (req->nbytes == 0) ||
1199 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1200 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1201 ablkctx->enckey_len, req->nbytes, ivsize);
1202 goto error;
1203 }
2f47d580
HJ
1204 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1205 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1206 AES_MIN_KEY_SIZE +
1207 sizeof(struct cpl_rx_phys_dsgl) +
1208 /*Min dsgl size*/
1209 32))) {
1210 /* Can be sent as Imm*/
1211 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1212
1213 dnents = sg_nents_xlen(req->dst, req->nbytes,
1214 CHCR_DST_SG_SIZE, 0);
1215 dnents += 1; // IV
1216 phys_dsgl = get_space_for_phys_dsgl(dnents);
125d01ca 1217 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580
HJ
1218 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1219 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1220 SGE_MAX_WR_LEN;
1221 bytes = IV + req->nbytes;
1222
1223 } else {
1224 reqctx->imm = 0;
1225 }
1226
1227 if (!reqctx->imm) {
1228 bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1229 MIN_CIPHER_SG,
5110e655 1230 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1231 0, 0);
db6deea4
HJ
1232 if ((bytes + reqctx->processed) >= req->nbytes)
1233 bytes = req->nbytes - reqctx->processed;
1234 else
125d01ca 1235 bytes = rounddown(bytes, 16);
2f47d580 1236 } else {
b8fd1f41 1237 bytes = req->nbytes;
2f47d580 1238 }
b8fd1f41 1239 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
db6deea4 1240 CRYPTO_ALG_SUB_TYPE_CTR) {
b8fd1f41
HJ
1241 bytes = adjust_ctr_overflow(req->info, bytes);
1242 }
1243 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1244 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1245 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1246 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1247 CTR_RFC3686_IV_SIZE);
1248
1249 /* initialize counter portion of counter block */
1250 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1251 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1252
1253 } else {
1254
2f47d580 1255 memcpy(reqctx->iv, req->info, IV);
b8fd1f41
HJ
1256 }
1257 if (unlikely(bytes == 0)) {
2f47d580
HJ
1258 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1259 req);
b8fd1f41
HJ
1260 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1261 req->base.flags,
1262 req->src,
1263 req->dst,
1264 req->nbytes,
7ffb9118 1265 reqctx->iv,
b8fd1f41
HJ
1266 op_type);
1267 goto error;
1268 }
b8fd1f41 1269 reqctx->op = op_type;
2f47d580
HJ
1270 reqctx->srcsg = req->src;
1271 reqctx->dstsg = req->dst;
1272 reqctx->src_ofst = 0;
1273 reqctx->dst_ofst = 0;
b8fd1f41
HJ
1274 wrparam.qid = qid;
1275 wrparam.req = req;
1276 wrparam.bytes = bytes;
1277 *skb = create_cipher_wr(&wrparam);
1278 if (IS_ERR(*skb)) {
1279 err = PTR_ERR(*skb);
2f47d580 1280 goto unmap;
b8fd1f41 1281 }
2f47d580
HJ
1282 reqctx->processed = bytes;
1283 reqctx->last_req_len = bytes;
b8fd1f41
HJ
1284
1285 return 0;
2f47d580
HJ
1286unmap:
1287 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1288error:
1289 return err;
1290}
1291
1292static int chcr_aes_encrypt(struct ablkcipher_request *req)
1293{
1294 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
b8fd1f41
HJ
1295 struct sk_buff *skb = NULL;
1296 int err;
2f47d580 1297 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
324429d7
HS
1298
1299 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1300 c_ctx(tfm)->tx_qidx))) {
324429d7
HS
1301 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1302 return -EBUSY;
1303 }
1304
2f47d580
HJ
1305 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1306 &skb, CHCR_ENCRYPT_OP);
b8fd1f41
HJ
1307 if (err || !skb)
1308 return err;
324429d7 1309 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1310 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
324429d7
HS
1311 chcr_send_wr(skb);
1312 return -EINPROGRESS;
1313}
1314
1315static int chcr_aes_decrypt(struct ablkcipher_request *req)
1316{
1317 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580 1318 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
b8fd1f41
HJ
1319 struct sk_buff *skb = NULL;
1320 int err;
324429d7
HS
1321
1322 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1323 c_ctx(tfm)->tx_qidx))) {
324429d7
HS
1324 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1325 return -EBUSY;
1326 }
1327
2f47d580
HJ
1328 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1329 &skb, CHCR_DECRYPT_OP);
b8fd1f41
HJ
1330 if (err || !skb)
1331 return err;
324429d7 1332 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1333 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
324429d7
HS
1334 chcr_send_wr(skb);
1335 return -EINPROGRESS;
1336}
1337
1338static int chcr_device_init(struct chcr_context *ctx)
1339{
14c19b17 1340 struct uld_ctx *u_ctx = NULL;
72a56ca9 1341 struct adapter *adap;
324429d7 1342 unsigned int id;
72a56ca9 1343 int txq_perchan, txq_idx, ntxq;
324429d7
HS
1344 int err = 0, rxq_perchan, rxq_idx;
1345
1346 id = smp_processor_id();
1347 if (!ctx->dev) {
14c19b17
HJ
1348 u_ctx = assign_chcr_device();
1349 if (!u_ctx) {
324429d7
HS
1350 pr_err("chcr device assignment fails\n");
1351 goto out;
1352 }
14c19b17 1353 ctx->dev = u_ctx->dev;
72a56ca9
HJ
1354 adap = padap(ctx->dev);
1355 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1356 adap->vres.ncrypto_fc);
324429d7 1357 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
72a56ca9 1358 txq_perchan = ntxq / u_ctx->lldi.nchan;
324429d7
HS
1359 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1360 rxq_idx += id % rxq_perchan;
72a56ca9
HJ
1361 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1362 txq_idx += id % txq_perchan;
324429d7 1363 spin_lock(&ctx->dev->lock_chcr_dev);
72a56ca9
HJ
1364 ctx->rx_qidx = rxq_idx;
1365 ctx->tx_qidx = txq_idx;
ab677ff4 1366 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
8a13449f 1367 ctx->dev->rx_channel_id = 0;
324429d7
HS
1368 spin_unlock(&ctx->dev->lock_chcr_dev);
1369 }
1370out:
1371 return err;
1372}
1373
1374static int chcr_cra_init(struct crypto_tfm *tfm)
1375{
b8fd1f41
HJ
1376 struct crypto_alg *alg = tfm->__crt_alg;
1377 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1378 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1379
1380 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1381 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1382 if (IS_ERR(ablkctx->sw_cipher)) {
1383 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1384 return PTR_ERR(ablkctx->sw_cipher);
1385 }
d3f1d2f7
HJ
1386
1387 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1388 /* To update tweak*/
1389 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1390 if (IS_ERR(ablkctx->aes_generic)) {
1391 pr_err("failed to allocate aes cipher for tweak\n");
1392 return PTR_ERR(ablkctx->aes_generic);
1393 }
1394 } else
1395 ablkctx->aes_generic = NULL;
1396
324429d7
HS
1397 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1398 return chcr_device_init(crypto_tfm_ctx(tfm));
1399}
1400
b8fd1f41
HJ
1401static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1402{
1403 struct crypto_alg *alg = tfm->__crt_alg;
1404 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406
1407 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1408 * cannot be used as fallback in chcr_handle_cipher_response
1409 */
1410 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1411 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1412 if (IS_ERR(ablkctx->sw_cipher)) {
1413 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1414 return PTR_ERR(ablkctx->sw_cipher);
1415 }
1416 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1417 return chcr_device_init(crypto_tfm_ctx(tfm));
1418}
1419
1420
1421static void chcr_cra_exit(struct crypto_tfm *tfm)
1422{
1423 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1424 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1425
1426 crypto_free_skcipher(ablkctx->sw_cipher);
d3f1d2f7
HJ
1427 if (ablkctx->aes_generic)
1428 crypto_free_cipher(ablkctx->aes_generic);
b8fd1f41
HJ
1429}
1430
324429d7
HS
1431static int get_alg_config(struct algo_param *params,
1432 unsigned int auth_size)
1433{
1434 switch (auth_size) {
1435 case SHA1_DIGEST_SIZE:
1436 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1437 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1438 params->result_size = SHA1_DIGEST_SIZE;
1439 break;
1440 case SHA224_DIGEST_SIZE:
1441 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1442 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1443 params->result_size = SHA256_DIGEST_SIZE;
1444 break;
1445 case SHA256_DIGEST_SIZE:
1446 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1447 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1448 params->result_size = SHA256_DIGEST_SIZE;
1449 break;
1450 case SHA384_DIGEST_SIZE:
1451 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1452 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1453 params->result_size = SHA512_DIGEST_SIZE;
1454 break;
1455 case SHA512_DIGEST_SIZE:
1456 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1457 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1458 params->result_size = SHA512_DIGEST_SIZE;
1459 break;
1460 default:
1461 pr_err("chcr : ERROR, unsupported digest size\n");
1462 return -EINVAL;
1463 }
1464 return 0;
1465}
1466
e7922729 1467static inline void chcr_free_shash(struct crypto_shash *base_hash)
324429d7 1468{
e7922729 1469 crypto_free_shash(base_hash);
324429d7
HS
1470}
1471
1472/**
358961d1 1473 * create_hash_wr - Create hash work request
324429d7
HS
1474 * @req - Cipher req base
1475 */
358961d1 1476static struct sk_buff *create_hash_wr(struct ahash_request *req,
2debd332 1477 struct hash_wr_param *param)
324429d7
HS
1478{
1479 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2f47d580 1481 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7 1482 struct sk_buff *skb = NULL;
2f47d580 1483 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
358961d1 1484 struct chcr_wr *chcr_req;
2f47d580 1485 struct ulptx_sgl *ulptx;
5110e655
HJ
1486 unsigned int nents = 0, transhdr_len;
1487 unsigned int temp = 0;
358961d1
HJ
1488 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1489 GFP_ATOMIC;
2f47d580
HJ
1490 struct adapter *adap = padap(h_ctx(tfm)->dev);
1491 int error = 0;
324429d7 1492
5110e655
HJ
1493 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1494 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1495 param->sg_len) <= SGE_MAX_WR_LEN;
1496 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1497 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
2f47d580 1498 nents += param->bfr_len ? 1 : 0;
5110e655
HJ
1499 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1500 param->sg_len, 16) : (sgl_len(nents) * 8);
125d01ca 1501 transhdr_len = roundup(transhdr_len, 16);
2f47d580 1502
5110e655 1503 skb = alloc_skb(transhdr_len, flags);
324429d7 1504 if (!skb)
2f47d580 1505 return ERR_PTR(-ENOMEM);
de77b966 1506 chcr_req = __skb_put_zero(skb, transhdr_len);
324429d7 1507
358961d1 1508 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580 1509 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
358961d1 1510 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 1511
358961d1 1512 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 1513 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 1514 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 1515 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 1516 chcr_req->sec_cpl.seqno_numivs =
324429d7 1517 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 1518 param->opad_needed, 0);
324429d7 1519
358961d1 1520 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
1521 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1522
358961d1
HJ
1523 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1524 param->alg_prm.result_size);
324429d7
HS
1525
1526 if (param->opad_needed)
358961d1
HJ
1527 memcpy(chcr_req->key_ctx.key +
1528 ((param->alg_prm.result_size <= 32) ? 32 :
1529 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
1530 hmacctx->opad, param->alg_prm.result_size);
1531
358961d1 1532 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
1533 param->alg_prm.mk_size, 0,
1534 param->opad_needed,
5110e655 1535 ((param->kctx_len +
358961d1
HJ
1536 sizeof(chcr_req->key_ctx)) >> 4));
1537 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
5110e655 1538 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
2f47d580
HJ
1539 DUMMY_BYTES);
1540 if (param->bfr_len != 0) {
5110e655
HJ
1541 req_ctx->hctx_wr.dma_addr =
1542 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1543 param->bfr_len, DMA_TO_DEVICE);
2f47d580 1544 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
5110e655 1545 req_ctx->hctx_wr. dma_addr)) {
2f47d580
HJ
1546 error = -ENOMEM;
1547 goto err;
1548 }
5110e655 1549 req_ctx->hctx_wr.dma_len = param->bfr_len;
2f47d580 1550 } else {
5110e655 1551 req_ctx->hctx_wr.dma_addr = 0;
2f47d580
HJ
1552 }
1553 chcr_add_hash_src_ent(req, ulptx, param);
1554 /* Request upto max wr size */
5110e655
HJ
1555 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1556 (param->sg_len + param->bfr_len) : 0);
ee0863ba 1557 atomic_inc(&adap->chcr_stats.digest_rqst);
5110e655
HJ
1558 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1559 param->hash_size, transhdr_len,
2f47d580 1560 temp, 0);
5110e655 1561 req_ctx->hctx_wr.skb = skb;
324429d7 1562 return skb;
2f47d580
HJ
1563err:
1564 kfree_skb(skb);
1565 return ERR_PTR(error);
324429d7
HS
1566}
1567
1568static int chcr_ahash_update(struct ahash_request *req)
1569{
1570 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1571 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1572 struct uld_ctx *u_ctx = NULL;
1573 struct sk_buff *skb;
1574 u8 remainder = 0, bs;
1575 unsigned int nbytes = req->nbytes;
1576 struct hash_wr_param params;
2f47d580 1577 int error;
324429d7
HS
1578
1579 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2f47d580 1580 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7 1581 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1582 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1583 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1584 return -EBUSY;
1585 }
1586
44fce12a
HJ
1587 if (nbytes + req_ctx->reqlen >= bs) {
1588 remainder = (nbytes + req_ctx->reqlen) % bs;
1589 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 1590 } else {
44fce12a
HJ
1591 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1592 + req_ctx->reqlen, nbytes, 0);
1593 req_ctx->reqlen += nbytes;
324429d7
HS
1594 return 0;
1595 }
5110e655 1596 chcr_init_hctx_per_wr(req_ctx);
2f47d580
HJ
1597 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1598 if (error)
1599 return -ENOMEM;
5110e655
HJ
1600 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1601 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1602 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1603 HASH_SPACE_LEFT(params.kctx_len), 0);
1604 if (params.sg_len > req->nbytes)
1605 params.sg_len = req->nbytes;
1606 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1607 req_ctx->reqlen;
324429d7
HS
1608 params.opad_needed = 0;
1609 params.more = 1;
1610 params.last = 0;
44fce12a 1611 params.bfr_len = req_ctx->reqlen;
324429d7 1612 params.scmd1 = 0;
5110e655
HJ
1613 req_ctx->hctx_wr.srcsg = req->src;
1614
1615 params.hash_size = params.alg_prm.result_size;
324429d7 1616 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 1617 skb = create_hash_wr(req, &params);
2f47d580
HJ
1618 if (IS_ERR(skb)) {
1619 error = PTR_ERR(skb);
1620 goto unmap;
1621 }
324429d7 1622
5110e655 1623 req_ctx->hctx_wr.processed += params.sg_len;
44fce12a 1624 if (remainder) {
44fce12a 1625 /* Swap buffers */
abfa2b37 1626 swap(req_ctx->reqbfr, req_ctx->skbfr);
324429d7 1627 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 1628 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 1629 remainder);
44fce12a
HJ
1630 }
1631 req_ctx->reqlen = remainder;
324429d7 1632 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1633 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1634 chcr_send_wr(skb);
1635
1636 return -EINPROGRESS;
2f47d580
HJ
1637unmap:
1638 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1639 return error;
324429d7
HS
1640}
1641
1642static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1643{
1644 memset(bfr_ptr, 0, bs);
1645 *bfr_ptr = 0x80;
1646 if (bs == 64)
1647 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1648 else
1649 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1650}
1651
1652static int chcr_ahash_final(struct ahash_request *req)
1653{
1654 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1655 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1656 struct hash_wr_param params;
1657 struct sk_buff *skb;
1658 struct uld_ctx *u_ctx = NULL;
1659 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1660
5110e655 1661 chcr_init_hctx_per_wr(req_ctx);
2f47d580 1662 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7
HS
1663 if (is_hmac(crypto_ahash_tfm(rtfm)))
1664 params.opad_needed = 1;
1665 else
1666 params.opad_needed = 0;
1667 params.sg_len = 0;
5110e655 1668 req_ctx->hctx_wr.isfinal = 1;
324429d7 1669 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1670 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1671 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1672 params.opad_needed = 1;
1673 params.kctx_len *= 2;
1674 } else {
1675 params.opad_needed = 0;
1676 }
1677
1678 req_ctx->hctx_wr.result = 1;
44fce12a 1679 params.bfr_len = req_ctx->reqlen;
324429d7 1680 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655 1681 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1682 if (req_ctx->reqlen == 0) {
1683 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1684 params.last = 0;
1685 params.more = 1;
1686 params.scmd1 = 0;
1687 params.bfr_len = bs;
1688
1689 } else {
1690 params.scmd1 = req_ctx->data_len;
1691 params.last = 1;
1692 params.more = 0;
1693 }
5110e655 1694 params.hash_size = crypto_ahash_digestsize(rtfm);
358961d1 1695 skb = create_hash_wr(req, &params);
40cdbe1a
YG
1696 if (IS_ERR(skb))
1697 return PTR_ERR(skb);
5110e655 1698 req_ctx->reqlen = 0;
324429d7 1699 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1700 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1701 chcr_send_wr(skb);
1702 return -EINPROGRESS;
1703}
1704
1705static int chcr_ahash_finup(struct ahash_request *req)
1706{
1707 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1708 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1709 struct uld_ctx *u_ctx = NULL;
1710 struct sk_buff *skb;
1711 struct hash_wr_param params;
1712 u8 bs;
2f47d580 1713 int error;
324429d7
HS
1714
1715 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2f47d580 1716 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7
HS
1717
1718 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1719 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1720 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1721 return -EBUSY;
1722 }
5110e655
HJ
1723 chcr_init_hctx_per_wr(req_ctx);
1724 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1725 if (error)
1726 return -ENOMEM;
324429d7 1727
5110e655
HJ
1728 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1729 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1730 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1731 params.kctx_len *= 2;
324429d7 1732 params.opad_needed = 1;
5110e655 1733 } else {
324429d7 1734 params.opad_needed = 0;
5110e655 1735 }
324429d7 1736
5110e655
HJ
1737 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1738 HASH_SPACE_LEFT(params.kctx_len), 0);
1739 if (params.sg_len < req->nbytes) {
1740 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1741 params.kctx_len /= 2;
1742 params.opad_needed = 0;
1743 }
1744 params.last = 0;
1745 params.more = 1;
1746 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1747 - req_ctx->reqlen;
1748 params.hash_size = params.alg_prm.result_size;
1749 params.scmd1 = 0;
1750 } else {
1751 params.last = 1;
1752 params.more = 0;
1753 params.sg_len = req->nbytes;
1754 params.hash_size = crypto_ahash_digestsize(rtfm);
1755 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1756 params.sg_len;
1757 }
44fce12a 1758 params.bfr_len = req_ctx->reqlen;
324429d7 1759 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655
HJ
1760 req_ctx->hctx_wr.result = 1;
1761 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1762 if ((req_ctx->reqlen + req->nbytes) == 0) {
1763 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1764 params.last = 0;
1765 params.more = 1;
1766 params.scmd1 = 0;
1767 params.bfr_len = bs;
324429d7 1768 }
358961d1 1769 skb = create_hash_wr(req, &params);
2f47d580
HJ
1770 if (IS_ERR(skb)) {
1771 error = PTR_ERR(skb);
1772 goto unmap;
1773 }
5110e655
HJ
1774 req_ctx->reqlen = 0;
1775 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 1776 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1777 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1778 chcr_send_wr(skb);
1779
1780 return -EINPROGRESS;
2f47d580
HJ
1781unmap:
1782 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1783 return error;
324429d7
HS
1784}
1785
1786static int chcr_ahash_digest(struct ahash_request *req)
1787{
1788 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1789 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1790 struct uld_ctx *u_ctx = NULL;
1791 struct sk_buff *skb;
1792 struct hash_wr_param params;
1793 u8 bs;
2f47d580 1794 int error;
324429d7
HS
1795
1796 rtfm->init(req);
1797 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1798
2f47d580 1799 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7 1800 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1801 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1802 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1803 return -EBUSY;
1804 }
1805
5110e655 1806 chcr_init_hctx_per_wr(req_ctx);
2f47d580
HJ
1807 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1808 if (error)
1809 return -ENOMEM;
324429d7 1810
324429d7 1811 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1812 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1813 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1814 params.kctx_len *= 2;
1815 params.opad_needed = 1;
1816 } else {
1817 params.opad_needed = 0;
1818 }
1819 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1820 HASH_SPACE_LEFT(params.kctx_len), 0);
1821 if (params.sg_len < req->nbytes) {
1822 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1823 params.kctx_len /= 2;
1824 params.opad_needed = 0;
1825 }
1826 params.last = 0;
1827 params.more = 1;
1828 params.scmd1 = 0;
1829 params.sg_len = rounddown(params.sg_len, bs);
1830 params.hash_size = params.alg_prm.result_size;
1831 } else {
1832 params.sg_len = req->nbytes;
1833 params.hash_size = crypto_ahash_digestsize(rtfm);
1834 params.last = 1;
1835 params.more = 0;
1836 params.scmd1 = req->nbytes + req_ctx->data_len;
1837
1838 }
1839 params.bfr_len = 0;
1840 req_ctx->hctx_wr.result = 1;
1841 req_ctx->hctx_wr.srcsg = req->src;
324429d7
HS
1842 req_ctx->data_len += params.bfr_len + params.sg_len;
1843
44fce12a
HJ
1844 if (req->nbytes == 0) {
1845 create_last_hash_block(req_ctx->reqbfr, bs, 0);
324429d7
HS
1846 params.more = 1;
1847 params.bfr_len = bs;
1848 }
1849
358961d1 1850 skb = create_hash_wr(req, &params);
2f47d580
HJ
1851 if (IS_ERR(skb)) {
1852 error = PTR_ERR(skb);
1853 goto unmap;
1854 }
5110e655 1855 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 1856 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1857 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1858 chcr_send_wr(skb);
1859 return -EINPROGRESS;
2f47d580
HJ
1860unmap:
1861 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1862 return error;
324429d7
HS
1863}
1864
6f76672b
HJ
1865static int chcr_ahash_continue(struct ahash_request *req)
1866{
1867 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1868 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1869 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1870 struct uld_ctx *u_ctx = NULL;
1871 struct sk_buff *skb;
1872 struct hash_wr_param params;
1873 u8 bs;
1874 int error;
1875
1876 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1877 u_ctx = ULD_CTX(h_ctx(rtfm));
1878 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1879 h_ctx(rtfm)->tx_qidx))) {
1880 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1881 return -EBUSY;
1882 }
1883 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1884 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1885 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1886 params.kctx_len *= 2;
1887 params.opad_needed = 1;
1888 } else {
1889 params.opad_needed = 0;
1890 }
1891 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1892 HASH_SPACE_LEFT(params.kctx_len),
1893 hctx_wr->src_ofst);
1894 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1895 params.sg_len = req->nbytes - hctx_wr->processed;
1896 if (!hctx_wr->result ||
1897 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1898 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1899 params.kctx_len /= 2;
1900 params.opad_needed = 0;
1901 }
1902 params.last = 0;
1903 params.more = 1;
1904 params.sg_len = rounddown(params.sg_len, bs);
1905 params.hash_size = params.alg_prm.result_size;
1906 params.scmd1 = 0;
1907 } else {
1908 params.last = 1;
1909 params.more = 0;
1910 params.hash_size = crypto_ahash_digestsize(rtfm);
1911 params.scmd1 = reqctx->data_len + params.sg_len;
1912 }
1913 params.bfr_len = 0;
1914 reqctx->data_len += params.sg_len;
1915 skb = create_hash_wr(req, &params);
1916 if (IS_ERR(skb)) {
1917 error = PTR_ERR(skb);
1918 goto err;
1919 }
1920 hctx_wr->processed += params.sg_len;
1921 skb->dev = u_ctx->lldi.ports[0];
1922 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1923 chcr_send_wr(skb);
1924 return 0;
1925err:
1926 return error;
1927}
1928
1929static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1930 unsigned char *input,
1931 int err)
1932{
1933 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1934 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1935 int digestsize, updated_digestsize;
1936 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1937 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1938
1939 if (input == NULL)
1940 goto out;
1941 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1942 updated_digestsize = digestsize;
1943 if (digestsize == SHA224_DIGEST_SIZE)
1944 updated_digestsize = SHA256_DIGEST_SIZE;
1945 else if (digestsize == SHA384_DIGEST_SIZE)
1946 updated_digestsize = SHA512_DIGEST_SIZE;
1947
1948 if (hctx_wr->dma_addr) {
1949 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1950 hctx_wr->dma_len, DMA_TO_DEVICE);
1951 hctx_wr->dma_addr = 0;
1952 }
1953 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1954 req->nbytes)) {
1955 if (hctx_wr->result == 1) {
1956 hctx_wr->result = 0;
1957 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1958 digestsize);
1959 } else {
1960 memcpy(reqctx->partial_hash,
1961 input + sizeof(struct cpl_fw6_pld),
1962 updated_digestsize);
1963
1964 }
1965 goto unmap;
1966 }
1967 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1968 updated_digestsize);
1969
1970 err = chcr_ahash_continue(req);
1971 if (err)
1972 goto unmap;
1973 return;
1974unmap:
1975 if (hctx_wr->is_sg_map)
1976 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1977
1978
1979out:
1980 req->base.complete(&req->base, err);
1981}
1982
1983/*
1984 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1985 * @req: crypto request
1986 */
1987int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1988 int err)
1989{
1990 struct crypto_tfm *tfm = req->tfm;
1991 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1992 struct adapter *adap = padap(ctx->dev);
1993
1994 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1995 case CRYPTO_ALG_TYPE_AEAD:
1996 chcr_handle_aead_resp(aead_request_cast(req), input, err);
1997 break;
1998
1999 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2000 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2001 input, err);
2002 break;
2003
2004 case CRYPTO_ALG_TYPE_AHASH:
2005 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2006 }
2007 atomic_inc(&adap->chcr_stats.complete);
2008 return err;
2009}
324429d7
HS
2010static int chcr_ahash_export(struct ahash_request *areq, void *out)
2011{
2012 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2013 struct chcr_ahash_req_ctx *state = out;
2014
44fce12a 2015 state->reqlen = req_ctx->reqlen;
324429d7 2016 state->data_len = req_ctx->data_len;
44fce12a 2017 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
2018 memcpy(state->partial_hash, req_ctx->partial_hash,
2019 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2020 chcr_init_hctx_per_wr(state);
44fce12a 2021 return 0;
324429d7
HS
2022}
2023
2024static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2025{
2026 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2027 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2028
44fce12a 2029 req_ctx->reqlen = state->reqlen;
324429d7 2030 req_ctx->data_len = state->data_len;
44fce12a
HJ
2031 req_ctx->reqbfr = req_ctx->bfr1;
2032 req_ctx->skbfr = req_ctx->bfr2;
2033 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
2034 memcpy(req_ctx->partial_hash, state->partial_hash,
2035 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2036 chcr_init_hctx_per_wr(req_ctx);
324429d7
HS
2037 return 0;
2038}
2039
2040static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2041 unsigned int keylen)
2042{
2f47d580 2043 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7
HS
2044 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2045 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2046 unsigned int i, err = 0, updated_digestsize;
2047
e7922729
HJ
2048 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2049
2050 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
2051 * first request's data. opad will be sent with the final hash result
2052 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2053 */
e7922729
HJ
2054 shash->tfm = hmacctx->base_hash;
2055 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
324429d7 2056 if (keylen > bs) {
e7922729 2057 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
2058 hmacctx->ipad);
2059 if (err)
2060 goto out;
2061 keylen = digestsize;
2062 } else {
2063 memcpy(hmacctx->ipad, key, keylen);
2064 }
2065 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2066 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2067
2068 for (i = 0; i < bs / sizeof(int); i++) {
2069 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2070 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2071 }
2072
2073 updated_digestsize = digestsize;
2074 if (digestsize == SHA224_DIGEST_SIZE)
2075 updated_digestsize = SHA256_DIGEST_SIZE;
2076 else if (digestsize == SHA384_DIGEST_SIZE)
2077 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 2078 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
2079 hmacctx->ipad, digestsize);
2080 if (err)
2081 goto out;
2082 chcr_change_order(hmacctx->ipad, updated_digestsize);
2083
e7922729 2084 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
2085 hmacctx->opad, digestsize);
2086 if (err)
2087 goto out;
2088 chcr_change_order(hmacctx->opad, updated_digestsize);
2089out:
2090 return err;
2091}
2092
b8fd1f41 2093static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
324429d7
HS
2094 unsigned int key_len)
2095{
2f47d580 2096 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7 2097 unsigned short context_size = 0;
b8fd1f41 2098 int err;
324429d7 2099
b8fd1f41
HJ
2100 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2101 if (err)
2102 goto badkey_err;
cc1b156d
HJ
2103
2104 memcpy(ablkctx->key, key, key_len);
2105 ablkctx->enckey_len = key_len;
2106 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2107 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2108 ablkctx->key_ctx_hdr =
2109 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2110 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2111 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2112 CHCR_KEYCTX_NO_KEY, 1,
2113 0, context_size);
2114 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2115 return 0;
b8fd1f41
HJ
2116badkey_err:
2117 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2118 ablkctx->enckey_len = 0;
2119
2120 return err;
324429d7
HS
2121}
2122
2123static int chcr_sha_init(struct ahash_request *areq)
2124{
2125 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2126 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2127 int digestsize = crypto_ahash_digestsize(tfm);
2128
2129 req_ctx->data_len = 0;
44fce12a
HJ
2130 req_ctx->reqlen = 0;
2131 req_ctx->reqbfr = req_ctx->bfr1;
2132 req_ctx->skbfr = req_ctx->bfr2;
324429d7 2133 copy_hash_init_values(req_ctx->partial_hash, digestsize);
5110e655 2134
324429d7
HS
2135 return 0;
2136}
2137
2138static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2139{
2140 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2141 sizeof(struct chcr_ahash_req_ctx));
2142 return chcr_device_init(crypto_tfm_ctx(tfm));
2143}
2144
2145static int chcr_hmac_init(struct ahash_request *areq)
2146{
2147 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2148 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2f47d580 2149 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
324429d7
HS
2150 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2151 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2152
2153 chcr_sha_init(areq);
2154 req_ctx->data_len = bs;
2155 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2156 if (digestsize == SHA224_DIGEST_SIZE)
2157 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2158 SHA256_DIGEST_SIZE);
2159 else if (digestsize == SHA384_DIGEST_SIZE)
2160 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2161 SHA512_DIGEST_SIZE);
2162 else
2163 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2164 digestsize);
2165 }
2166 return 0;
2167}
2168
2169static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2170{
2171 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2172 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2173 unsigned int digestsize =
2174 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2175
2176 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2177 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
2178 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2179 if (IS_ERR(hmacctx->base_hash))
2180 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
2181 return chcr_device_init(crypto_tfm_ctx(tfm));
2182}
2183
324429d7
HS
2184static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2185{
2186 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2187 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2188
e7922729
HJ
2189 if (hmacctx->base_hash) {
2190 chcr_free_shash(hmacctx->base_hash);
2191 hmacctx->base_hash = NULL;
324429d7
HS
2192 }
2193}
2194
2f47d580
HJ
2195static int chcr_aead_common_init(struct aead_request *req,
2196 unsigned short op_type)
2debd332 2197{
2f47d580
HJ
2198 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2199 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2200 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2201 int error = -EINVAL;
2f47d580 2202 unsigned int authsize = crypto_aead_authsize(tfm);
2debd332 2203
2f47d580
HJ
2204 /* validate key size */
2205 if (aeadctx->enckey_len == 0)
2206 goto err;
2207 if (op_type && req->cryptlen < authsize)
2208 goto err;
2209 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2210 op_type);
2211 if (error) {
2212 error = -ENOMEM;
2213 goto err;
2214 }
2215 reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2216 CHCR_SRC_SG_SIZE, 0);
2217 reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2218 CHCR_SRC_SG_SIZE, req->assoclen);
2219 return 0;
2220err:
2221 return error;
2debd332 2222}
2f47d580
HJ
2223
2224static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
0e93708d
HJ
2225 int aadmax, int wrlen,
2226 unsigned short op_type)
2227{
2228 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2229
2230 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2f47d580 2231 dst_nents > MAX_DSGL_ENT ||
0e93708d 2232 (req->assoclen > aadmax) ||
2f47d580 2233 (wrlen > SGE_MAX_WR_LEN))
0e93708d
HJ
2234 return 1;
2235 return 0;
2236}
2debd332 2237
0e93708d
HJ
2238static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2239{
2240 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2241 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
2242 struct aead_request *subreq = aead_request_ctx(req);
2243
2244 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2245 aead_request_set_callback(subreq, req->base.flags,
2246 req->base.complete, req->base.data);
2247 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2248 req->iv);
2249 aead_request_set_ad(subreq, req->assoclen);
2250 return op_type ? crypto_aead_decrypt(subreq) :
2251 crypto_aead_encrypt(subreq);
2252}
2debd332
HJ
2253
2254static struct sk_buff *create_authenc_wr(struct aead_request *req,
2255 unsigned short qid,
2256 int size,
2257 unsigned short op_type)
2258{
2259 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2260 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2261 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2262 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2263 struct sk_buff *skb = NULL;
2264 struct chcr_wr *chcr_req;
2265 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2266 struct ulptx_sgl *ulptx;
2267 unsigned int transhdr_len;
3d64bd67 2268 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2f47d580 2269 unsigned int kctx_len = 0, dnents;
2debd332
HJ
2270 unsigned int assoclen = req->assoclen;
2271 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2272 int error = -EINVAL;
2debd332
HJ
2273 int null = 0;
2274 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2275 GFP_ATOMIC;
2f47d580 2276 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2277
2f47d580
HJ
2278 if (req->cryptlen == 0)
2279 return NULL;
2debd332 2280
2f47d580 2281 reqctx->b0_dma = 0;
3d64bd67
HJ
2282 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2283 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2debd332
HJ
2284 null = 1;
2285 assoclen = 0;
2286 }
2f47d580
HJ
2287 error = chcr_aead_common_init(req, op_type);
2288 if (error)
2289 return ERR_PTR(error);
5abc8db0
HJ
2290 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2291 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2292 (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2293 req->assoclen);
2294 dnents += MIN_AUTH_SG; // For IV
2f47d580
HJ
2295
2296 dst_size = get_space_for_phys_dsgl(dnents);
2debd332
HJ
2297 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2298 - sizeof(chcr_req->key_ctx);
2299 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2300 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2301 SGE_MAX_WR_LEN;
125d01ca
HJ
2302 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2303 : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2f47d580
HJ
2304 + MIN_GCM_SG) * 8);
2305 transhdr_len += temp;
125d01ca 2306 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
2307
2308 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2309 transhdr_len, op_type)) {
ee0863ba 2310 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2311 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2312 op_type);
0e93708d
HJ
2313 return ERR_PTR(chcr_aead_fallback(req, op_type));
2314 }
2f47d580 2315 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
5fe8c711
HJ
2316 if (!skb) {
2317 error = -ENOMEM;
2debd332 2318 goto err;
5fe8c711 2319 }
2debd332 2320
de77b966 2321 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 2322
2f47d580 2323 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332
HJ
2324
2325 /*
2326 * Input order is AAD,IV and Payload. where IV should be included as
2327 * the part of authdata. All other fields should be filled according
2328 * to the hardware spec
2329 */
2330 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580
HJ
2331 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2332 assoclen + 1);
2333 chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2debd332
HJ
2334 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2335 assoclen ? 1 : 0, assoclen,
2f47d580
HJ
2336 assoclen + IV + 1,
2337 (temp & 0x1F0) >> 4);
2debd332 2338 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2f47d580
HJ
2339 temp & 0xF,
2340 null ? 0 : assoclen + IV + 1,
2341 temp, temp);
3d64bd67
HJ
2342 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2343 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2344 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2345 else
2346 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2debd332
HJ
2347 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2348 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
3d64bd67 2349 temp,
2debd332 2350 actx->auth_mode, aeadctx->hmac_ctrl,
2f47d580 2351 IV >> 1);
2debd332 2352 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 2353 0, 0, dst_size);
2debd332
HJ
2354
2355 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3d64bd67
HJ
2356 if (op_type == CHCR_ENCRYPT_OP ||
2357 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2358 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2debd332
HJ
2359 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2360 aeadctx->enckey_len);
2361 else
2362 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2363 aeadctx->enckey_len);
2364
125d01ca
HJ
2365 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2366 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
3d64bd67
HJ
2367 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2368 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2369 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2370 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2371 CTR_RFC3686_IV_SIZE);
2372 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2373 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2374 } else {
2375 memcpy(reqctx->iv, req->iv, IV);
2376 }
2debd332 2377 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
2378 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2379 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2380 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
ee0863ba 2381 atomic_inc(&adap->chcr_stats.cipher_rqst);
2f47d580
HJ
2382 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2383 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2384 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2385 transhdr_len, temp, 0);
2debd332 2386 reqctx->skb = skb;
2f47d580 2387 reqctx->op = op_type;
2debd332
HJ
2388
2389 return skb;
2debd332 2390err:
2f47d580
HJ
2391 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2392 op_type);
2393
5fe8c711 2394 return ERR_PTR(error);
2debd332
HJ
2395}
2396
6dad4e8a
AG
2397int chcr_aead_dma_map(struct device *dev,
2398 struct aead_request *req,
2399 unsigned short op_type)
2f47d580
HJ
2400{
2401 int error;
2402 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2403 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2404 unsigned int authsize = crypto_aead_authsize(tfm);
2405 int dst_size;
2406
2407 dst_size = req->assoclen + req->cryptlen + (op_type ?
2408 -authsize : authsize);
2409 if (!req->cryptlen || !dst_size)
2410 return 0;
2411 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2412 DMA_BIDIRECTIONAL);
2413 if (dma_mapping_error(dev, reqctx->iv_dma))
2414 return -ENOMEM;
2415
2416 if (req->src == req->dst) {
2417 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2418 DMA_BIDIRECTIONAL);
2419 if (!error)
2420 goto err;
2421 } else {
2422 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2423 DMA_TO_DEVICE);
2424 if (!error)
2425 goto err;
2426 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2427 DMA_FROM_DEVICE);
2428 if (!error) {
2429 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2430 DMA_TO_DEVICE);
2431 goto err;
2432 }
2433 }
2434
2435 return 0;
2436err:
2437 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2438 return -ENOMEM;
2439}
2440
6dad4e8a
AG
2441void chcr_aead_dma_unmap(struct device *dev,
2442 struct aead_request *req,
2443 unsigned short op_type)
2f47d580
HJ
2444{
2445 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2446 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2447 unsigned int authsize = crypto_aead_authsize(tfm);
2448 int dst_size;
2449
2450 dst_size = req->assoclen + req->cryptlen + (op_type ?
2451 -authsize : authsize);
2452 if (!req->cryptlen || !dst_size)
2453 return;
2454
2455 dma_unmap_single(dev, reqctx->iv_dma, IV,
2456 DMA_BIDIRECTIONAL);
2457 if (req->src == req->dst) {
2458 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2459 DMA_BIDIRECTIONAL);
2460 } else {
2461 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2462 DMA_TO_DEVICE);
2463 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2464 DMA_FROM_DEVICE);
2465 }
2466}
2467
6dad4e8a
AG
2468void chcr_add_aead_src_ent(struct aead_request *req,
2469 struct ulptx_sgl *ulptx,
2470 unsigned int assoclen,
2471 unsigned short op_type)
2f47d580
HJ
2472{
2473 struct ulptx_walk ulp_walk;
2474 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2475
2476 if (reqctx->imm) {
2477 u8 *buf = (u8 *)ulptx;
2478
2479 if (reqctx->b0_dma) {
2480 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2481 buf += reqctx->b0_len;
2482 }
2483 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2484 buf, assoclen, 0);
2485 buf += assoclen;
2486 memcpy(buf, reqctx->iv, IV);
2487 buf += IV;
2488 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2489 buf, req->cryptlen, req->assoclen);
2490 } else {
2491 ulptx_walk_init(&ulp_walk, ulptx);
2492 if (reqctx->b0_dma)
2493 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2494 &reqctx->b0_dma);
2495 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2496 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2497 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2498 req->assoclen);
2499 ulptx_walk_end(&ulp_walk);
2500 }
2501}
2502
6dad4e8a
AG
2503void chcr_add_aead_dst_ent(struct aead_request *req,
2504 struct cpl_rx_phys_dsgl *phys_cpl,
2505 unsigned int assoclen,
2506 unsigned short op_type,
2507 unsigned short qid)
2f47d580
HJ
2508{
2509 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2510 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2511 struct dsgl_walk dsgl_walk;
2512 unsigned int authsize = crypto_aead_authsize(tfm);
2513 u32 temp;
2514
2515 dsgl_walk_init(&dsgl_walk, phys_cpl);
2516 if (reqctx->b0_dma)
2517 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2518 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2519 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2520 temp = req->cryptlen + (op_type ? -authsize : authsize);
2521 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2522 dsgl_walk_end(&dsgl_walk, qid);
2523}
2524
6dad4e8a
AG
2525void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2526 struct ulptx_sgl *ulptx,
2527 struct cipher_wr_param *wrparam)
2f47d580
HJ
2528{
2529 struct ulptx_walk ulp_walk;
2530 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2531
2532 if (reqctx->imm) {
2533 u8 *buf = (u8 *)ulptx;
2534
2535 memcpy(buf, reqctx->iv, IV);
2536 buf += IV;
2537 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2538 buf, wrparam->bytes, reqctx->processed);
2539 } else {
2540 ulptx_walk_init(&ulp_walk, ulptx);
2541 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2542 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2543 reqctx->src_ofst);
2544 reqctx->srcsg = ulp_walk.last_sg;
2545 reqctx->src_ofst = ulp_walk.last_sg_len;
2546 ulptx_walk_end(&ulp_walk);
2547 }
2548}
2549
6dad4e8a
AG
2550void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2551 struct cpl_rx_phys_dsgl *phys_cpl,
2552 struct cipher_wr_param *wrparam,
2553 unsigned short qid)
2f47d580
HJ
2554{
2555 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2556 struct dsgl_walk dsgl_walk;
2557
2558 dsgl_walk_init(&dsgl_walk, phys_cpl);
2559 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2560 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2561 reqctx->dst_ofst);
2562 reqctx->dstsg = dsgl_walk.last_sg;
2563 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2564
2565 dsgl_walk_end(&dsgl_walk, qid);
2566}
2567
6dad4e8a
AG
2568void chcr_add_hash_src_ent(struct ahash_request *req,
2569 struct ulptx_sgl *ulptx,
2570 struct hash_wr_param *param)
2f47d580
HJ
2571{
2572 struct ulptx_walk ulp_walk;
2573 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2574
5110e655 2575 if (reqctx->hctx_wr.imm) {
2f47d580
HJ
2576 u8 *buf = (u8 *)ulptx;
2577
2578 if (param->bfr_len) {
2579 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2580 buf += param->bfr_len;
2581 }
5110e655
HJ
2582
2583 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2584 sg_nents(reqctx->hctx_wr.srcsg), buf,
2585 param->sg_len, 0);
2f47d580
HJ
2586 } else {
2587 ulptx_walk_init(&ulp_walk, ulptx);
2588 if (param->bfr_len)
2589 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
5110e655
HJ
2590 &reqctx->hctx_wr.dma_addr);
2591 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2592 param->sg_len, reqctx->hctx_wr.src_ofst);
2593 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2594 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
db6deea4 2595 ulptx_walk_end(&ulp_walk);
2f47d580
HJ
2596 }
2597}
2598
6dad4e8a
AG
2599int chcr_hash_dma_map(struct device *dev,
2600 struct ahash_request *req)
2f47d580
HJ
2601{
2602 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2603 int error = 0;
2604
2605 if (!req->nbytes)
2606 return 0;
2607 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2608 DMA_TO_DEVICE);
2609 if (!error)
7814f552 2610 return -ENOMEM;
5110e655 2611 req_ctx->hctx_wr.is_sg_map = 1;
2f47d580
HJ
2612 return 0;
2613}
2614
6dad4e8a
AG
2615void chcr_hash_dma_unmap(struct device *dev,
2616 struct ahash_request *req)
2f47d580
HJ
2617{
2618 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2619
2620 if (!req->nbytes)
2621 return;
2622
2623 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2624 DMA_TO_DEVICE);
5110e655 2625 req_ctx->hctx_wr.is_sg_map = 0;
2f47d580
HJ
2626
2627}
2628
6dad4e8a
AG
2629int chcr_cipher_dma_map(struct device *dev,
2630 struct ablkcipher_request *req)
2f47d580
HJ
2631{
2632 int error;
2633 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2634
2635 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2636 DMA_BIDIRECTIONAL);
2637 if (dma_mapping_error(dev, reqctx->iv_dma))
2638 return -ENOMEM;
2639
2640 if (req->src == req->dst) {
2641 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2642 DMA_BIDIRECTIONAL);
2643 if (!error)
2644 goto err;
2645 } else {
2646 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2647 DMA_TO_DEVICE);
2648 if (!error)
2649 goto err;
2650 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2651 DMA_FROM_DEVICE);
2652 if (!error) {
2653 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2654 DMA_TO_DEVICE);
2655 goto err;
2656 }
2657 }
2658
2659 return 0;
2660err:
2661 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2662 return -ENOMEM;
2663}
6dad4e8a
AG
2664
2665void chcr_cipher_dma_unmap(struct device *dev,
2666 struct ablkcipher_request *req)
2f47d580
HJ
2667{
2668 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2669
2670 dma_unmap_single(dev, reqctx->iv_dma, IV,
2671 DMA_BIDIRECTIONAL);
2672 if (req->src == req->dst) {
2673 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2674 DMA_BIDIRECTIONAL);
2675 } else {
2676 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2677 DMA_TO_DEVICE);
2678 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2679 DMA_FROM_DEVICE);
2680 }
2681}
2682
2debd332
HJ
2683static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2684{
2685 __be32 data;
2686
2687 memset(block, 0, csize);
2688 block += csize;
2689
2690 if (csize >= 4)
2691 csize = 4;
2692 else if (msglen > (unsigned int)(1 << (8 * csize)))
2693 return -EOVERFLOW;
2694
2695 data = cpu_to_be32(msglen);
2696 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2697
2698 return 0;
2699}
2700
2701static void generate_b0(struct aead_request *req,
2702 struct chcr_aead_ctx *aeadctx,
2703 unsigned short op_type)
2704{
2705 unsigned int l, lp, m;
2706 int rc;
2707 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2708 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2709 u8 *b0 = reqctx->scratch_pad;
2710
2711 m = crypto_aead_authsize(aead);
2712
2713 memcpy(b0, reqctx->iv, 16);
2714
2715 lp = b0[0];
2716 l = lp + 1;
2717
2718 /* set m, bits 3-5 */
2719 *b0 |= (8 * ((m - 2) / 2));
2720
2721 /* set adata, bit 6, if associated data is used */
2722 if (req->assoclen)
2723 *b0 |= 64;
2724 rc = set_msg_len(b0 + 16 - l,
2725 (op_type == CHCR_DECRYPT_OP) ?
2726 req->cryptlen - m : req->cryptlen, l);
2727}
2728
2729static inline int crypto_ccm_check_iv(const u8 *iv)
2730{
2731 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2732 if (iv[0] < 1 || iv[0] > 7)
2733 return -EINVAL;
2734
2735 return 0;
2736}
2737
2738static int ccm_format_packet(struct aead_request *req,
2739 struct chcr_aead_ctx *aeadctx,
2740 unsigned int sub_type,
2741 unsigned short op_type)
2742{
2743 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2744 int rc = 0;
2745
2debd332
HJ
2746 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2747 reqctx->iv[0] = 3;
2748 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2749 memcpy(reqctx->iv + 4, req->iv, 8);
2750 memset(reqctx->iv + 12, 0, 4);
2751 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2752 htons(req->assoclen - 8);
2753 } else {
2754 memcpy(reqctx->iv, req->iv, 16);
2755 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2756 htons(req->assoclen);
2757 }
2758 generate_b0(req, aeadctx, op_type);
2759 /* zero the ctr value */
2760 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2761 return rc;
2762}
2763
2764static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2765 unsigned int dst_size,
2766 struct aead_request *req,
2f47d580 2767 unsigned short op_type)
2debd332
HJ
2768{
2769 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2770 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2771 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2772 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2f47d580 2773 unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2debd332
HJ
2774 unsigned int ccm_xtra;
2775 unsigned char tag_offset = 0, auth_offset = 0;
2debd332
HJ
2776 unsigned int assoclen;
2777
2778 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2779 assoclen = req->assoclen - 8;
2780 else
2781 assoclen = req->assoclen;
2782 ccm_xtra = CCM_B0_SIZE +
2783 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2784
2785 auth_offset = req->cryptlen ?
2f47d580 2786 (assoclen + IV + 1 + ccm_xtra) : 0;
2debd332
HJ
2787 if (op_type == CHCR_DECRYPT_OP) {
2788 if (crypto_aead_authsize(tfm) != req->cryptlen)
2789 tag_offset = crypto_aead_authsize(tfm);
2790 else
2791 auth_offset = 0;
2792 }
2793
2794
2795 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2f47d580 2796 2, assoclen + 1 + ccm_xtra);
2debd332 2797 sec_cpl->pldlen =
2f47d580 2798 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2debd332
HJ
2799 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2800 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2801 1, assoclen + ccm_xtra, assoclen
2f47d580 2802 + IV + 1 + ccm_xtra, 0);
2debd332
HJ
2803
2804 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2805 auth_offset, tag_offset,
2806 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2807 crypto_aead_authsize(tfm));
2808 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2809 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
0a7bd30c 2810 cipher_mode, mac_mode,
2f47d580 2811 aeadctx->hmac_ctrl, IV >> 1);
2debd332
HJ
2812
2813 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2f47d580 2814 0, dst_size);
2debd332
HJ
2815}
2816
1efb892b
CIK
2817static int aead_ccm_validate_input(unsigned short op_type,
2818 struct aead_request *req,
2819 struct chcr_aead_ctx *aeadctx,
2820 unsigned int sub_type)
2debd332
HJ
2821{
2822 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2823 if (crypto_ccm_check_iv(req->iv)) {
2824 pr_err("CCM: IV check fails\n");
2825 return -EINVAL;
2826 }
2827 } else {
2828 if (req->assoclen != 16 && req->assoclen != 20) {
2829 pr_err("RFC4309: Invalid AAD length %d\n",
2830 req->assoclen);
2831 return -EINVAL;
2832 }
2833 }
2debd332
HJ
2834 return 0;
2835}
2836
2debd332
HJ
2837static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2838 unsigned short qid,
2839 int size,
2840 unsigned short op_type)
2841{
2842 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2843 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2844 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2845 struct sk_buff *skb = NULL;
2846 struct chcr_wr *chcr_req;
2847 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2848 struct ulptx_sgl *ulptx;
2849 unsigned int transhdr_len;
2850 unsigned int dst_size = 0, kctx_len, dnents, temp;
2851 unsigned int sub_type, assoclen = req->assoclen;
2debd332 2852 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2853 int error = -EINVAL;
2debd332
HJ
2854 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2855 GFP_ATOMIC;
2f47d580 2856 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2857
2f47d580
HJ
2858 reqctx->b0_dma = 0;
2859 sub_type = get_aead_subtype(tfm);
2860 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2861 assoclen -= 8;
2f47d580
HJ
2862 error = chcr_aead_common_init(req, op_type);
2863 if (error)
2864 return ERR_PTR(error);
0e93708d 2865
2f47d580
HJ
2866
2867 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
5fe8c711
HJ
2868 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2869 if (error)
2debd332 2870 goto err;
e1a018e6
HJ
2871 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2872 dnents += sg_nents_xlen(req->dst, req->cryptlen
2873 + (op_type ? -authsize : authsize),
2874 CHCR_DST_SG_SIZE, req->assoclen);
2875 dnents += MIN_CCM_SG; // For IV and B0
2f47d580 2876 dst_size = get_space_for_phys_dsgl(dnents);
125d01ca 2877 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2debd332 2878 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2879 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2880 reqctx->b0_len) <= SGE_MAX_WR_LEN;
125d01ca
HJ
2881 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2882 reqctx->b0_len, 16) :
2f47d580
HJ
2883 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2884 MIN_CCM_SG) * 8);
2885 transhdr_len += temp;
125d01ca 2886 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
2887
2888 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2889 reqctx->b0_len, transhdr_len, op_type)) {
ee0863ba 2890 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2891 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2892 op_type);
0e93708d
HJ
2893 return ERR_PTR(chcr_aead_fallback(req, op_type));
2894 }
2f47d580 2895 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2debd332 2896
5fe8c711
HJ
2897 if (!skb) {
2898 error = -ENOMEM;
2debd332 2899 goto err;
5fe8c711 2900 }
2debd332 2901
2f47d580 2902 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2debd332 2903
2f47d580 2904 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2debd332
HJ
2905
2906 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2907 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
2908 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2909 aeadctx->key, aeadctx->enckey_len);
2debd332
HJ
2910
2911 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580 2912 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
5fe8c711
HJ
2913 error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2914 if (error)
2debd332
HJ
2915 goto dstmap_fail;
2916
2f47d580
HJ
2917 reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2918 &reqctx->scratch_pad, reqctx->b0_len,
2919 DMA_BIDIRECTIONAL);
2920 if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2921 reqctx->b0_dma)) {
2922 error = -ENOMEM;
2debd332 2923 goto dstmap_fail;
2f47d580
HJ
2924 }
2925
2926 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2927 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2debd332 2928
ee0863ba 2929 atomic_inc(&adap->chcr_stats.aead_rqst);
2f47d580
HJ
2930 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2931 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2932 reqctx->b0_len) : 0);
2933 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2934 transhdr_len, temp, 0);
2debd332 2935 reqctx->skb = skb;
2f47d580
HJ
2936 reqctx->op = op_type;
2937
2debd332
HJ
2938 return skb;
2939dstmap_fail:
2940 kfree_skb(skb);
2debd332 2941err:
2f47d580 2942 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
5fe8c711 2943 return ERR_PTR(error);
2debd332
HJ
2944}
2945
2946static struct sk_buff *create_gcm_wr(struct aead_request *req,
2947 unsigned short qid,
2948 int size,
2949 unsigned short op_type)
2950{
2951 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2952 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2953 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2954 struct sk_buff *skb = NULL;
2955 struct chcr_wr *chcr_req;
2956 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2957 struct ulptx_sgl *ulptx;
2958 unsigned int transhdr_len, dnents = 0;
2959 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2debd332 2960 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2961 int error = -EINVAL;
2debd332
HJ
2962 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2963 GFP_ATOMIC;
2f47d580 2964 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2965
2f47d580
HJ
2966 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2967 assoclen = req->assoclen - 8;
2debd332 2968
2f47d580 2969 reqctx->b0_dma = 0;
2f47d580 2970 error = chcr_aead_common_init(req, op_type);
e1a018e6
HJ
2971 if (error)
2972 return ERR_PTR(error);
2973 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2974 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2975 (op_type ? -authsize : authsize),
2f47d580 2976 CHCR_DST_SG_SIZE, req->assoclen);
e1a018e6 2977 dnents += MIN_GCM_SG; // For IV
2f47d580 2978 dst_size = get_space_for_phys_dsgl(dnents);
125d01ca 2979 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2debd332 2980 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2981 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2982 SGE_MAX_WR_LEN;
125d01ca
HJ
2983 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2984 (sgl_len(reqctx->src_nents +
2985 reqctx->aad_nents + MIN_GCM_SG) * 8);
2f47d580 2986 transhdr_len += temp;
125d01ca 2987 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
2988 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2989 transhdr_len, op_type)) {
ee0863ba 2990 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2991 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2992 op_type);
0e93708d
HJ
2993 return ERR_PTR(chcr_aead_fallback(req, op_type));
2994 }
2f47d580 2995 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
5fe8c711
HJ
2996 if (!skb) {
2997 error = -ENOMEM;
2debd332 2998 goto err;
5fe8c711 2999 }
2debd332 3000
de77b966 3001 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 3002
2f47d580
HJ
3003 //Offset of tag from end
3004 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332 3005 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2f47d580
HJ
3006 a_ctx(tfm)->dev->rx_channel_id, 2,
3007 (assoclen + 1));
0e93708d 3008 chcr_req->sec_cpl.pldlen =
2f47d580 3009 htonl(assoclen + IV + req->cryptlen);
2debd332 3010 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
d600fc8a 3011 assoclen ? 1 : 0, assoclen,
2f47d580 3012 assoclen + IV + 1, 0);
e1a018e6 3013 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2f47d580
HJ
3014 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
3015 temp, temp);
e1a018e6 3016 chcr_req->sec_cpl.seqno_numivs =
2debd332
HJ
3017 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
3018 CHCR_ENCRYPT_OP) ? 1 : 0,
3019 CHCR_SCMD_CIPHER_MODE_AES_GCM,
0a7bd30c 3020 CHCR_SCMD_AUTH_MODE_GHASH,
2f47d580 3021 aeadctx->hmac_ctrl, IV >> 1);
2debd332 3022 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 3023 0, 0, dst_size);
2debd332
HJ
3024 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3025 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
3026 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3027 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2debd332
HJ
3028
3029 /* prepare a 16 byte iv */
3030 /* S A L T | IV | 0x00000001 */
3031 if (get_aead_subtype(tfm) ==
3032 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3033 memcpy(reqctx->iv, aeadctx->salt, 4);
8f6acb7f 3034 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
2debd332 3035 } else {
8f6acb7f 3036 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
2debd332
HJ
3037 }
3038 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3039
3040 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580 3041 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2debd332 3042
2f47d580
HJ
3043 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
3044 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
ee0863ba 3045 atomic_inc(&adap->chcr_stats.aead_rqst);
2f47d580
HJ
3046 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3047 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3048 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3049 transhdr_len, temp, reqctx->verify);
2debd332 3050 reqctx->skb = skb;
2f47d580 3051 reqctx->op = op_type;
2debd332
HJ
3052 return skb;
3053
2debd332 3054err:
2f47d580 3055 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
5fe8c711 3056 return ERR_PTR(error);
2debd332
HJ
3057}
3058
3059
3060
3061static int chcr_aead_cra_init(struct crypto_aead *tfm)
3062{
2f47d580 3063 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
3064 struct aead_alg *alg = crypto_aead_alg(tfm);
3065
3066 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
5fe8c711
HJ
3067 CRYPTO_ALG_NEED_FALLBACK |
3068 CRYPTO_ALG_ASYNC);
0e93708d
HJ
3069 if (IS_ERR(aeadctx->sw_cipher))
3070 return PTR_ERR(aeadctx->sw_cipher);
3071 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3072 sizeof(struct aead_request) +
3073 crypto_aead_reqsize(aeadctx->sw_cipher)));
2f47d580 3074 return chcr_device_init(a_ctx(tfm));
2debd332
HJ
3075}
3076
3077static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3078{
2f47d580 3079 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d 3080
0e93708d 3081 crypto_free_aead(aeadctx->sw_cipher);
2debd332
HJ
3082}
3083
3084static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3085 unsigned int authsize)
3086{
2f47d580 3087 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3088
3089 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3090 aeadctx->mayverify = VERIFY_HW;
0e93708d 3091 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3092}
3093static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3094 unsigned int authsize)
3095{
2f47d580 3096 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3097 u32 maxauth = crypto_aead_maxauthsize(tfm);
3098
3099 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3100 * true for sha1. authsize == 12 condition should be before
3101 * authsize == (maxauth >> 1)
3102 */
3103 if (authsize == ICV_4) {
3104 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3105 aeadctx->mayverify = VERIFY_HW;
3106 } else if (authsize == ICV_6) {
3107 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3108 aeadctx->mayverify = VERIFY_HW;
3109 } else if (authsize == ICV_10) {
3110 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3111 aeadctx->mayverify = VERIFY_HW;
3112 } else if (authsize == ICV_12) {
3113 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3114 aeadctx->mayverify = VERIFY_HW;
3115 } else if (authsize == ICV_14) {
3116 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3117 aeadctx->mayverify = VERIFY_HW;
3118 } else if (authsize == (maxauth >> 1)) {
3119 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3120 aeadctx->mayverify = VERIFY_HW;
3121 } else if (authsize == maxauth) {
3122 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3123 aeadctx->mayverify = VERIFY_HW;
3124 } else {
3125 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3126 aeadctx->mayverify = VERIFY_SW;
3127 }
0e93708d 3128 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3129}
3130
3131
3132static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3133{
2f47d580 3134 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3135
3136 switch (authsize) {
3137 case ICV_4:
3138 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139 aeadctx->mayverify = VERIFY_HW;
3140 break;
3141 case ICV_8:
3142 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3143 aeadctx->mayverify = VERIFY_HW;
3144 break;
3145 case ICV_12:
3146 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3147 aeadctx->mayverify = VERIFY_HW;
3148 break;
3149 case ICV_14:
3150 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151 aeadctx->mayverify = VERIFY_HW;
3152 break;
3153 case ICV_16:
3154 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3155 aeadctx->mayverify = VERIFY_HW;
3156 break;
3157 case ICV_13:
3158 case ICV_15:
3159 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160 aeadctx->mayverify = VERIFY_SW;
3161 break;
3162 default:
3163
3164 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3165 CRYPTO_TFM_RES_BAD_KEY_LEN);
3166 return -EINVAL;
3167 }
0e93708d 3168 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3169}
3170
3171static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3172 unsigned int authsize)
3173{
2f47d580 3174 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3175
3176 switch (authsize) {
3177 case ICV_8:
3178 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3179 aeadctx->mayverify = VERIFY_HW;
3180 break;
3181 case ICV_12:
3182 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3183 aeadctx->mayverify = VERIFY_HW;
3184 break;
3185 case ICV_16:
3186 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3187 aeadctx->mayverify = VERIFY_HW;
3188 break;
3189 default:
3190 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3191 CRYPTO_TFM_RES_BAD_KEY_LEN);
3192 return -EINVAL;
3193 }
0e93708d 3194 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3195}
3196
3197static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3198 unsigned int authsize)
3199{
2f47d580 3200 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3201
3202 switch (authsize) {
3203 case ICV_4:
3204 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3205 aeadctx->mayverify = VERIFY_HW;
3206 break;
3207 case ICV_6:
3208 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3209 aeadctx->mayverify = VERIFY_HW;
3210 break;
3211 case ICV_8:
3212 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213 aeadctx->mayverify = VERIFY_HW;
3214 break;
3215 case ICV_10:
3216 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3217 aeadctx->mayverify = VERIFY_HW;
3218 break;
3219 case ICV_12:
3220 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3221 aeadctx->mayverify = VERIFY_HW;
3222 break;
3223 case ICV_14:
3224 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3225 aeadctx->mayverify = VERIFY_HW;
3226 break;
3227 case ICV_16:
3228 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3229 aeadctx->mayverify = VERIFY_HW;
3230 break;
3231 default:
3232 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3233 CRYPTO_TFM_RES_BAD_KEY_LEN);
3234 return -EINVAL;
3235 }
0e93708d 3236 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3237}
3238
0e93708d 3239static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2debd332
HJ
3240 const u8 *key,
3241 unsigned int keylen)
3242{
2f47d580 3243 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332
HJ
3244 unsigned char ck_size, mk_size;
3245 int key_ctx_size = 0;
3246
125d01ca 3247 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
2debd332 3248 if (keylen == AES_KEYSIZE_128) {
2debd332 3249 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
125d01ca 3250 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2debd332
HJ
3251 } else if (keylen == AES_KEYSIZE_192) {
3252 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3253 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3254 } else if (keylen == AES_KEYSIZE_256) {
3255 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3256 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3257 } else {
3258 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3259 CRYPTO_TFM_RES_BAD_KEY_LEN);
3260 aeadctx->enckey_len = 0;
3261 return -EINVAL;
3262 }
3263 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3264 key_ctx_size >> 4);
0e93708d
HJ
3265 memcpy(aeadctx->key, key, keylen);
3266 aeadctx->enckey_len = keylen;
3267
2debd332
HJ
3268 return 0;
3269}
3270
0e93708d
HJ
3271static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3272 const u8 *key,
3273 unsigned int keylen)
3274{
2f47d580 3275 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
0e93708d
HJ
3276 int error;
3277
3278 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3279 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3280 CRYPTO_TFM_REQ_MASK);
3281 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3282 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3283 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3284 CRYPTO_TFM_RES_MASK);
3285 if (error)
3286 return error;
3287 return chcr_ccm_common_setkey(aead, key, keylen);
3288}
3289
2debd332
HJ
3290static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3291 unsigned int keylen)
3292{
2f47d580 3293 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
4dbeae42 3294 int error;
2debd332
HJ
3295
3296 if (keylen < 3) {
3297 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3298 CRYPTO_TFM_RES_BAD_KEY_LEN);
3299 aeadctx->enckey_len = 0;
3300 return -EINVAL;
3301 }
4dbeae42
HJ
3302 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3303 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3304 CRYPTO_TFM_REQ_MASK);
3305 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3306 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3307 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3308 CRYPTO_TFM_RES_MASK);
3309 if (error)
3310 return error;
2debd332
HJ
3311 keylen -= 3;
3312 memcpy(aeadctx->salt, key + keylen, 3);
0e93708d 3313 return chcr_ccm_common_setkey(aead, key, keylen);
2debd332
HJ
3314}
3315
3316static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3317 unsigned int keylen)
3318{
2f47d580 3319 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332 3320 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
8356ea51 3321 struct crypto_cipher *cipher;
2debd332
HJ
3322 unsigned int ck_size;
3323 int ret = 0, key_ctx_size = 0;
3324
0e93708d
HJ
3325 aeadctx->enckey_len = 0;
3326 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3327 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3328 & CRYPTO_TFM_REQ_MASK);
3329 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3330 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3331 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3332 CRYPTO_TFM_RES_MASK);
3333 if (ret)
3334 goto out;
3335
7c2cf1c4
HJ
3336 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3337 keylen > 3) {
2debd332
HJ
3338 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3339 memcpy(aeadctx->salt, key + keylen, 4);
3340 }
3341 if (keylen == AES_KEYSIZE_128) {
3342 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3343 } else if (keylen == AES_KEYSIZE_192) {
3344 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3345 } else if (keylen == AES_KEYSIZE_256) {
3346 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3347 } else {
3348 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3349 CRYPTO_TFM_RES_BAD_KEY_LEN);
0e93708d 3350 pr_err("GCM: Invalid key length %d\n", keylen);
2debd332
HJ
3351 ret = -EINVAL;
3352 goto out;
3353 }
3354
3355 memcpy(aeadctx->key, key, keylen);
3356 aeadctx->enckey_len = keylen;
125d01ca 3357 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
2debd332 3358 AEAD_H_SIZE;
125d01ca 3359 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2debd332
HJ
3360 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3361 0, 0,
3362 key_ctx_size >> 4);
8356ea51
HJ
3363 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3364 * It will go in key context
2debd332 3365 */
8356ea51
HJ
3366 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3367 if (IS_ERR(cipher)) {
2debd332
HJ
3368 aeadctx->enckey_len = 0;
3369 ret = -ENOMEM;
3370 goto out;
3371 }
8356ea51
HJ
3372
3373 ret = crypto_cipher_setkey(cipher, key, keylen);
2debd332
HJ
3374 if (ret) {
3375 aeadctx->enckey_len = 0;
3376 goto out1;
3377 }
3378 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
8356ea51 3379 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2debd332
HJ
3380
3381out1:
8356ea51 3382 crypto_free_cipher(cipher);
2debd332
HJ
3383out:
3384 return ret;
3385}
3386
3387static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3388 unsigned int keylen)
3389{
2f47d580 3390 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3391 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3392 /* it contains auth and cipher key both*/
3393 struct crypto_authenc_keys keys;
3d64bd67 3394 unsigned int bs, subtype;
2debd332
HJ
3395 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3396 int err = 0, i, key_ctx_len = 0;
3397 unsigned char ck_size = 0;
3398 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
ec1bca94 3399 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2debd332
HJ
3400 struct algo_param param;
3401 int align;
3402 u8 *o_ptr = NULL;
3403
0e93708d
HJ
3404 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3405 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3406 & CRYPTO_TFM_REQ_MASK);
3407 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3408 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3409 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3410 & CRYPTO_TFM_RES_MASK);
3411 if (err)
3412 goto out;
3413
2debd332
HJ
3414 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3415 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3416 goto out;
3417 }
3418
3419 if (get_alg_config(&param, max_authsize)) {
3420 pr_err("chcr : Unsupported digest size\n");
3421 goto out;
3422 }
3d64bd67
HJ
3423 subtype = get_aead_subtype(authenc);
3424 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3425 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3426 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3427 goto out;
3428 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3429 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3430 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3431 }
2debd332
HJ
3432 if (keys.enckeylen == AES_KEYSIZE_128) {
3433 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3434 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3435 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3436 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3437 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3438 } else {
3439 pr_err("chcr : Unsupported cipher key\n");
3440 goto out;
3441 }
3442
3443 /* Copy only encryption key. We use authkey to generate h(ipad) and
3444 * h(opad) so authkey is not needed again. authkeylen size have the
3445 * size of the hash digest size.
3446 */
3447 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3448 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3449 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3450 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
2debd332 3451
3d64bd67
HJ
3452 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3453 aeadctx->enckey_len << 3);
3454 }
2debd332
HJ
3455 base_hash = chcr_alloc_shash(max_authsize);
3456 if (IS_ERR(base_hash)) {
3457 pr_err("chcr : Base driver cannot be loaded\n");
0e93708d
HJ
3458 aeadctx->enckey_len = 0;
3459 return -EINVAL;
324429d7 3460 }
2debd332
HJ
3461 {
3462 SHASH_DESC_ON_STACK(shash, base_hash);
3463 shash->tfm = base_hash;
3464 shash->flags = crypto_shash_get_flags(base_hash);
3465 bs = crypto_shash_blocksize(base_hash);
3466 align = KEYCTX_ALIGN_PAD(max_authsize);
3467 o_ptr = actx->h_iopad + param.result_size + align;
3468
3469 if (keys.authkeylen > bs) {
3470 err = crypto_shash_digest(shash, keys.authkey,
3471 keys.authkeylen,
3472 o_ptr);
3473 if (err) {
3474 pr_err("chcr : Base driver cannot be loaded\n");
3475 goto out;
3476 }
3477 keys.authkeylen = max_authsize;
3478 } else
3479 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3480
3481 /* Compute the ipad-digest*/
3482 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3483 memcpy(pad, o_ptr, keys.authkeylen);
3484 for (i = 0; i < bs >> 2; i++)
3485 *((unsigned int *)pad + i) ^= IPAD_DATA;
3486
3487 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3488 max_authsize))
3489 goto out;
3490 /* Compute the opad-digest */
3491 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3492 memcpy(pad, o_ptr, keys.authkeylen);
3493 for (i = 0; i < bs >> 2; i++)
3494 *((unsigned int *)pad + i) ^= OPAD_DATA;
3495
3496 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3497 goto out;
3498
3499 /* convert the ipad and opad digest to network order */
3500 chcr_change_order(actx->h_iopad, param.result_size);
3501 chcr_change_order(o_ptr, param.result_size);
3502 key_ctx_len = sizeof(struct _key_ctx) +
125d01ca 3503 roundup(keys.enckeylen, 16) +
2debd332
HJ
3504 (param.result_size + align) * 2;
3505 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3506 0, 1, key_ctx_len >> 4);
3507 actx->auth_mode = param.auth_mode;
3508 chcr_free_shash(base_hash);
3509
3510 return 0;
3511 }
3512out:
3513 aeadctx->enckey_len = 0;
ec1bca94 3514 if (!IS_ERR(base_hash))
2debd332
HJ
3515 chcr_free_shash(base_hash);
3516 return -EINVAL;
324429d7
HS
3517}
3518
2debd332
HJ
3519static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3520 const u8 *key, unsigned int keylen)
3521{
2f47d580 3522 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3523 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3524 struct crypto_authenc_keys keys;
0e93708d 3525 int err;
2debd332 3526 /* it contains auth and cipher key both*/
3d64bd67 3527 unsigned int subtype;
2debd332
HJ
3528 int key_ctx_len = 0;
3529 unsigned char ck_size = 0;
3530
0e93708d
HJ
3531 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3532 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3533 & CRYPTO_TFM_REQ_MASK);
3534 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3535 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3536 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3537 & CRYPTO_TFM_RES_MASK);
3538 if (err)
3539 goto out;
3540
2debd332
HJ
3541 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3542 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3543 goto out;
3544 }
3d64bd67
HJ
3545 subtype = get_aead_subtype(authenc);
3546 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3547 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3548 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3549 goto out;
3550 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3551 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3552 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3553 }
2debd332
HJ
3554 if (keys.enckeylen == AES_KEYSIZE_128) {
3555 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3556 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3557 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3558 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3559 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3560 } else {
3d64bd67 3561 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
2debd332
HJ
3562 goto out;
3563 }
3564 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3565 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3566 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3567 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3568 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3569 aeadctx->enckey_len << 3);
3570 }
125d01ca 3571 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
2debd332
HJ
3572
3573 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3574 0, key_ctx_len >> 4);
3575 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3576 return 0;
3577out:
3578 aeadctx->enckey_len = 0;
3579 return -EINVAL;
3580}
6dad4e8a
AG
3581
3582static int chcr_aead_op(struct aead_request *req,
3583 unsigned short op_type,
3584 int size,
3585 create_wr_t create_wr_fn)
3586{
3587 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3588 struct uld_ctx *u_ctx;
3589 struct sk_buff *skb;
3590
3591 if (!a_ctx(tfm)->dev) {
3592 pr_err("chcr : %s : No crypto device.\n", __func__);
3593 return -ENXIO;
3594 }
3595 u_ctx = ULD_CTX(a_ctx(tfm));
3596 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3597 a_ctx(tfm)->tx_qidx)) {
3598 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3599 return -EBUSY;
3600 }
3601
3602 /* Form a WR from req */
3603 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
3604 op_type);
3605
3606 if (IS_ERR(skb) || !skb)
3607 return PTR_ERR(skb);
3608
3609 skb->dev = u_ctx->lldi.ports[0];
3610 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3611 chcr_send_wr(skb);
3612 return -EINPROGRESS;
3613}
3614
2debd332
HJ
3615static int chcr_aead_encrypt(struct aead_request *req)
3616{
3617 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3618 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3619
3620 reqctx->verify = VERIFY_HW;
3621
3622 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3623 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3624 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3625 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3626 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
2debd332
HJ
3627 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3628 create_authenc_wr);
3629 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3630 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3631 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3632 create_aead_ccm_wr);
3633 default:
3634 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3635 create_gcm_wr);
3636 }
3637}
3638
3639static int chcr_aead_decrypt(struct aead_request *req)
3640{
3641 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 3642 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3643 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3644 int size;
3645
3646 if (aeadctx->mayverify == VERIFY_SW) {
3647 size = crypto_aead_maxauthsize(tfm);
3648 reqctx->verify = VERIFY_SW;
3649 } else {
3650 size = 0;
3651 reqctx->verify = VERIFY_HW;
3652 }
3653
3654 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3655 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3656 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3657 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3658 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
2debd332
HJ
3659 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3660 create_authenc_wr);
3661 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3662 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3663 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3664 create_aead_ccm_wr);
3665 default:
3666 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3667 create_gcm_wr);
3668 }
3669}
3670
324429d7
HS
3671static struct chcr_alg_template driver_algs[] = {
3672 /* AES-CBC */
3673 {
b8fd1f41 3674 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
324429d7
HS
3675 .is_registered = 0,
3676 .alg.crypto = {
3677 .cra_name = "cbc(aes)",
2debd332 3678 .cra_driver_name = "cbc-aes-chcr",
324429d7 3679 .cra_blocksize = AES_BLOCK_SIZE,
324429d7 3680 .cra_init = chcr_cra_init,
b8fd1f41 3681 .cra_exit = chcr_cra_exit,
324429d7
HS
3682 .cra_u.ablkcipher = {
3683 .min_keysize = AES_MIN_KEY_SIZE,
3684 .max_keysize = AES_MAX_KEY_SIZE,
3685 .ivsize = AES_BLOCK_SIZE,
3686 .setkey = chcr_aes_cbc_setkey,
3687 .encrypt = chcr_aes_encrypt,
3688 .decrypt = chcr_aes_decrypt,
3689 }
3690 }
3691 },
3692 {
b8fd1f41 3693 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
324429d7
HS
3694 .is_registered = 0,
3695 .alg.crypto = {
3696 .cra_name = "xts(aes)",
2debd332 3697 .cra_driver_name = "xts-aes-chcr",
324429d7 3698 .cra_blocksize = AES_BLOCK_SIZE,
324429d7
HS
3699 .cra_init = chcr_cra_init,
3700 .cra_exit = NULL,
b8fd1f41 3701 .cra_u .ablkcipher = {
324429d7
HS
3702 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3703 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3704 .ivsize = AES_BLOCK_SIZE,
3705 .setkey = chcr_aes_xts_setkey,
3706 .encrypt = chcr_aes_encrypt,
3707 .decrypt = chcr_aes_decrypt,
3708 }
3709 }
b8fd1f41
HJ
3710 },
3711 {
3712 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3713 .is_registered = 0,
3714 .alg.crypto = {
3715 .cra_name = "ctr(aes)",
3716 .cra_driver_name = "ctr-aes-chcr",
3717 .cra_blocksize = 1,
3718 .cra_init = chcr_cra_init,
3719 .cra_exit = chcr_cra_exit,
3720 .cra_u.ablkcipher = {
3721 .min_keysize = AES_MIN_KEY_SIZE,
3722 .max_keysize = AES_MAX_KEY_SIZE,
3723 .ivsize = AES_BLOCK_SIZE,
3724 .setkey = chcr_aes_ctr_setkey,
3725 .encrypt = chcr_aes_encrypt,
3726 .decrypt = chcr_aes_decrypt,
3727 }
3728 }
3729 },
3730 {
3731 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3732 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3733 .is_registered = 0,
3734 .alg.crypto = {
3735 .cra_name = "rfc3686(ctr(aes))",
3736 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3737 .cra_blocksize = 1,
3738 .cra_init = chcr_rfc3686_init,
3739 .cra_exit = chcr_cra_exit,
3740 .cra_u.ablkcipher = {
3741 .min_keysize = AES_MIN_KEY_SIZE +
3742 CTR_RFC3686_NONCE_SIZE,
3743 .max_keysize = AES_MAX_KEY_SIZE +
3744 CTR_RFC3686_NONCE_SIZE,
3745 .ivsize = CTR_RFC3686_IV_SIZE,
3746 .setkey = chcr_aes_rfc3686_setkey,
3747 .encrypt = chcr_aes_encrypt,
3748 .decrypt = chcr_aes_decrypt,
3749 .geniv = "seqiv",
3750 }
324429d7
HS
3751 }
3752 },
3753 /* SHA */
3754 {
3755 .type = CRYPTO_ALG_TYPE_AHASH,
3756 .is_registered = 0,
3757 .alg.hash = {
3758 .halg.digestsize = SHA1_DIGEST_SIZE,
3759 .halg.base = {
3760 .cra_name = "sha1",
3761 .cra_driver_name = "sha1-chcr",
3762 .cra_blocksize = SHA1_BLOCK_SIZE,
3763 }
3764 }
3765 },
3766 {
3767 .type = CRYPTO_ALG_TYPE_AHASH,
3768 .is_registered = 0,
3769 .alg.hash = {
3770 .halg.digestsize = SHA256_DIGEST_SIZE,
3771 .halg.base = {
3772 .cra_name = "sha256",
3773 .cra_driver_name = "sha256-chcr",
3774 .cra_blocksize = SHA256_BLOCK_SIZE,
3775 }
3776 }
3777 },
3778 {
3779 .type = CRYPTO_ALG_TYPE_AHASH,
3780 .is_registered = 0,
3781 .alg.hash = {
3782 .halg.digestsize = SHA224_DIGEST_SIZE,
3783 .halg.base = {
3784 .cra_name = "sha224",
3785 .cra_driver_name = "sha224-chcr",
3786 .cra_blocksize = SHA224_BLOCK_SIZE,
3787 }
3788 }
3789 },
3790 {
3791 .type = CRYPTO_ALG_TYPE_AHASH,
3792 .is_registered = 0,
3793 .alg.hash = {
3794 .halg.digestsize = SHA384_DIGEST_SIZE,
3795 .halg.base = {
3796 .cra_name = "sha384",
3797 .cra_driver_name = "sha384-chcr",
3798 .cra_blocksize = SHA384_BLOCK_SIZE,
3799 }
3800 }
3801 },
3802 {
3803 .type = CRYPTO_ALG_TYPE_AHASH,
3804 .is_registered = 0,
3805 .alg.hash = {
3806 .halg.digestsize = SHA512_DIGEST_SIZE,
3807 .halg.base = {
3808 .cra_name = "sha512",
3809 .cra_driver_name = "sha512-chcr",
3810 .cra_blocksize = SHA512_BLOCK_SIZE,
3811 }
3812 }
3813 },
3814 /* HMAC */
3815 {
3816 .type = CRYPTO_ALG_TYPE_HMAC,
3817 .is_registered = 0,
3818 .alg.hash = {
3819 .halg.digestsize = SHA1_DIGEST_SIZE,
3820 .halg.base = {
3821 .cra_name = "hmac(sha1)",
2debd332 3822 .cra_driver_name = "hmac-sha1-chcr",
324429d7
HS
3823 .cra_blocksize = SHA1_BLOCK_SIZE,
3824 }
3825 }
3826 },
3827 {
3828 .type = CRYPTO_ALG_TYPE_HMAC,
3829 .is_registered = 0,
3830 .alg.hash = {
3831 .halg.digestsize = SHA224_DIGEST_SIZE,
3832 .halg.base = {
3833 .cra_name = "hmac(sha224)",
2debd332 3834 .cra_driver_name = "hmac-sha224-chcr",
324429d7
HS
3835 .cra_blocksize = SHA224_BLOCK_SIZE,
3836 }
3837 }
3838 },
3839 {
3840 .type = CRYPTO_ALG_TYPE_HMAC,
3841 .is_registered = 0,
3842 .alg.hash = {
3843 .halg.digestsize = SHA256_DIGEST_SIZE,
3844 .halg.base = {
3845 .cra_name = "hmac(sha256)",
2debd332 3846 .cra_driver_name = "hmac-sha256-chcr",
324429d7
HS
3847 .cra_blocksize = SHA256_BLOCK_SIZE,
3848 }
3849 }
3850 },
3851 {
3852 .type = CRYPTO_ALG_TYPE_HMAC,
3853 .is_registered = 0,
3854 .alg.hash = {
3855 .halg.digestsize = SHA384_DIGEST_SIZE,
3856 .halg.base = {
3857 .cra_name = "hmac(sha384)",
2debd332 3858 .cra_driver_name = "hmac-sha384-chcr",
324429d7
HS
3859 .cra_blocksize = SHA384_BLOCK_SIZE,
3860 }
3861 }
3862 },
3863 {
3864 .type = CRYPTO_ALG_TYPE_HMAC,
3865 .is_registered = 0,
3866 .alg.hash = {
3867 .halg.digestsize = SHA512_DIGEST_SIZE,
3868 .halg.base = {
3869 .cra_name = "hmac(sha512)",
2debd332 3870 .cra_driver_name = "hmac-sha512-chcr",
324429d7
HS
3871 .cra_blocksize = SHA512_BLOCK_SIZE,
3872 }
3873 }
3874 },
2debd332
HJ
3875 /* Add AEAD Algorithms */
3876 {
3877 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3878 .is_registered = 0,
3879 .alg.aead = {
3880 .base = {
3881 .cra_name = "gcm(aes)",
3882 .cra_driver_name = "gcm-aes-chcr",
3883 .cra_blocksize = 1,
e29abda5 3884 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3885 .cra_ctxsize = sizeof(struct chcr_context) +
3886 sizeof(struct chcr_aead_ctx) +
3887 sizeof(struct chcr_gcm_ctx),
3888 },
8f6acb7f 3889 .ivsize = GCM_AES_IV_SIZE,
2debd332
HJ
3890 .maxauthsize = GHASH_DIGEST_SIZE,
3891 .setkey = chcr_gcm_setkey,
3892 .setauthsize = chcr_gcm_setauthsize,
3893 }
3894 },
3895 {
3896 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3897 .is_registered = 0,
3898 .alg.aead = {
3899 .base = {
3900 .cra_name = "rfc4106(gcm(aes))",
3901 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3902 .cra_blocksize = 1,
e29abda5 3903 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3904 .cra_ctxsize = sizeof(struct chcr_context) +
3905 sizeof(struct chcr_aead_ctx) +
3906 sizeof(struct chcr_gcm_ctx),
3907
3908 },
8f6acb7f 3909 .ivsize = GCM_RFC4106_IV_SIZE,
2debd332
HJ
3910 .maxauthsize = GHASH_DIGEST_SIZE,
3911 .setkey = chcr_gcm_setkey,
3912 .setauthsize = chcr_4106_4309_setauthsize,
3913 }
3914 },
3915 {
3916 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3917 .is_registered = 0,
3918 .alg.aead = {
3919 .base = {
3920 .cra_name = "ccm(aes)",
3921 .cra_driver_name = "ccm-aes-chcr",
3922 .cra_blocksize = 1,
e29abda5 3923 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3924 .cra_ctxsize = sizeof(struct chcr_context) +
3925 sizeof(struct chcr_aead_ctx),
3926
3927 },
3928 .ivsize = AES_BLOCK_SIZE,
3929 .maxauthsize = GHASH_DIGEST_SIZE,
3930 .setkey = chcr_aead_ccm_setkey,
3931 .setauthsize = chcr_ccm_setauthsize,
3932 }
3933 },
3934 {
3935 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3936 .is_registered = 0,
3937 .alg.aead = {
3938 .base = {
3939 .cra_name = "rfc4309(ccm(aes))",
3940 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3941 .cra_blocksize = 1,
e29abda5 3942 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3943 .cra_ctxsize = sizeof(struct chcr_context) +
3944 sizeof(struct chcr_aead_ctx),
3945
3946 },
3947 .ivsize = 8,
3948 .maxauthsize = GHASH_DIGEST_SIZE,
3949 .setkey = chcr_aead_rfc4309_setkey,
3950 .setauthsize = chcr_4106_4309_setauthsize,
3951 }
3952 },
3953 {
3d64bd67 3954 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
3955 .is_registered = 0,
3956 .alg.aead = {
3957 .base = {
3958 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3959 .cra_driver_name =
3960 "authenc-hmac-sha1-cbc-aes-chcr",
3961 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3962 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3963 .cra_ctxsize = sizeof(struct chcr_context) +
3964 sizeof(struct chcr_aead_ctx) +
3965 sizeof(struct chcr_authenc_ctx),
3966
3967 },
3968 .ivsize = AES_BLOCK_SIZE,
3969 .maxauthsize = SHA1_DIGEST_SIZE,
3970 .setkey = chcr_authenc_setkey,
3971 .setauthsize = chcr_authenc_setauthsize,
3972 }
3973 },
3974 {
3d64bd67 3975 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
3976 .is_registered = 0,
3977 .alg.aead = {
3978 .base = {
3979
3980 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3981 .cra_driver_name =
3982 "authenc-hmac-sha256-cbc-aes-chcr",
3983 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3984 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3985 .cra_ctxsize = sizeof(struct chcr_context) +
3986 sizeof(struct chcr_aead_ctx) +
3987 sizeof(struct chcr_authenc_ctx),
3988
3989 },
3990 .ivsize = AES_BLOCK_SIZE,
3991 .maxauthsize = SHA256_DIGEST_SIZE,
3992 .setkey = chcr_authenc_setkey,
3993 .setauthsize = chcr_authenc_setauthsize,
3994 }
3995 },
3996 {
3d64bd67 3997 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
3998 .is_registered = 0,
3999 .alg.aead = {
4000 .base = {
4001 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4002 .cra_driver_name =
4003 "authenc-hmac-sha224-cbc-aes-chcr",
4004 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4005 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4006 .cra_ctxsize = sizeof(struct chcr_context) +
4007 sizeof(struct chcr_aead_ctx) +
4008 sizeof(struct chcr_authenc_ctx),
4009 },
4010 .ivsize = AES_BLOCK_SIZE,
4011 .maxauthsize = SHA224_DIGEST_SIZE,
4012 .setkey = chcr_authenc_setkey,
4013 .setauthsize = chcr_authenc_setauthsize,
4014 }
4015 },
4016 {
3d64bd67 4017 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4018 .is_registered = 0,
4019 .alg.aead = {
4020 .base = {
4021 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4022 .cra_driver_name =
4023 "authenc-hmac-sha384-cbc-aes-chcr",
4024 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4025 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4026 .cra_ctxsize = sizeof(struct chcr_context) +
4027 sizeof(struct chcr_aead_ctx) +
4028 sizeof(struct chcr_authenc_ctx),
4029
4030 },
4031 .ivsize = AES_BLOCK_SIZE,
4032 .maxauthsize = SHA384_DIGEST_SIZE,
4033 .setkey = chcr_authenc_setkey,
4034 .setauthsize = chcr_authenc_setauthsize,
4035 }
4036 },
4037 {
3d64bd67 4038 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4039 .is_registered = 0,
4040 .alg.aead = {
4041 .base = {
4042 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4043 .cra_driver_name =
4044 "authenc-hmac-sha512-cbc-aes-chcr",
4045 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4046 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4047 .cra_ctxsize = sizeof(struct chcr_context) +
4048 sizeof(struct chcr_aead_ctx) +
4049 sizeof(struct chcr_authenc_ctx),
4050
4051 },
4052 .ivsize = AES_BLOCK_SIZE,
4053 .maxauthsize = SHA512_DIGEST_SIZE,
4054 .setkey = chcr_authenc_setkey,
4055 .setauthsize = chcr_authenc_setauthsize,
4056 }
4057 },
4058 {
3d64bd67 4059 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
2debd332
HJ
4060 .is_registered = 0,
4061 .alg.aead = {
4062 .base = {
4063 .cra_name = "authenc(digest_null,cbc(aes))",
4064 .cra_driver_name =
4065 "authenc-digest_null-cbc-aes-chcr",
4066 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4067 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4068 .cra_ctxsize = sizeof(struct chcr_context) +
4069 sizeof(struct chcr_aead_ctx) +
4070 sizeof(struct chcr_authenc_ctx),
4071
4072 },
4073 .ivsize = AES_BLOCK_SIZE,
4074 .maxauthsize = 0,
4075 .setkey = chcr_aead_digest_null_setkey,
4076 .setauthsize = chcr_authenc_null_setauthsize,
4077 }
4078 },
3d64bd67
HJ
4079 {
4080 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4081 .is_registered = 0,
4082 .alg.aead = {
4083 .base = {
4084 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4085 .cra_driver_name =
4086 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4087 .cra_blocksize = 1,
4088 .cra_priority = CHCR_AEAD_PRIORITY,
4089 .cra_ctxsize = sizeof(struct chcr_context) +
4090 sizeof(struct chcr_aead_ctx) +
4091 sizeof(struct chcr_authenc_ctx),
4092
4093 },
4094 .ivsize = CTR_RFC3686_IV_SIZE,
4095 .maxauthsize = SHA1_DIGEST_SIZE,
4096 .setkey = chcr_authenc_setkey,
4097 .setauthsize = chcr_authenc_setauthsize,
4098 }
4099 },
4100 {
4101 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4102 .is_registered = 0,
4103 .alg.aead = {
4104 .base = {
4105
4106 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4107 .cra_driver_name =
4108 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4109 .cra_blocksize = 1,
4110 .cra_priority = CHCR_AEAD_PRIORITY,
4111 .cra_ctxsize = sizeof(struct chcr_context) +
4112 sizeof(struct chcr_aead_ctx) +
4113 sizeof(struct chcr_authenc_ctx),
4114
4115 },
4116 .ivsize = CTR_RFC3686_IV_SIZE,
4117 .maxauthsize = SHA256_DIGEST_SIZE,
4118 .setkey = chcr_authenc_setkey,
4119 .setauthsize = chcr_authenc_setauthsize,
4120 }
4121 },
4122 {
4123 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4124 .is_registered = 0,
4125 .alg.aead = {
4126 .base = {
4127 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4128 .cra_driver_name =
4129 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4130 .cra_blocksize = 1,
4131 .cra_priority = CHCR_AEAD_PRIORITY,
4132 .cra_ctxsize = sizeof(struct chcr_context) +
4133 sizeof(struct chcr_aead_ctx) +
4134 sizeof(struct chcr_authenc_ctx),
4135 },
4136 .ivsize = CTR_RFC3686_IV_SIZE,
4137 .maxauthsize = SHA224_DIGEST_SIZE,
4138 .setkey = chcr_authenc_setkey,
4139 .setauthsize = chcr_authenc_setauthsize,
4140 }
4141 },
4142 {
4143 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4144 .is_registered = 0,
4145 .alg.aead = {
4146 .base = {
4147 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4148 .cra_driver_name =
4149 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4150 .cra_blocksize = 1,
4151 .cra_priority = CHCR_AEAD_PRIORITY,
4152 .cra_ctxsize = sizeof(struct chcr_context) +
4153 sizeof(struct chcr_aead_ctx) +
4154 sizeof(struct chcr_authenc_ctx),
4155
4156 },
4157 .ivsize = CTR_RFC3686_IV_SIZE,
4158 .maxauthsize = SHA384_DIGEST_SIZE,
4159 .setkey = chcr_authenc_setkey,
4160 .setauthsize = chcr_authenc_setauthsize,
4161 }
4162 },
4163 {
4164 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4165 .is_registered = 0,
4166 .alg.aead = {
4167 .base = {
4168 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4169 .cra_driver_name =
4170 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4171 .cra_blocksize = 1,
4172 .cra_priority = CHCR_AEAD_PRIORITY,
4173 .cra_ctxsize = sizeof(struct chcr_context) +
4174 sizeof(struct chcr_aead_ctx) +
4175 sizeof(struct chcr_authenc_ctx),
4176
4177 },
4178 .ivsize = CTR_RFC3686_IV_SIZE,
4179 .maxauthsize = SHA512_DIGEST_SIZE,
4180 .setkey = chcr_authenc_setkey,
4181 .setauthsize = chcr_authenc_setauthsize,
4182 }
4183 },
4184 {
4185 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4186 .is_registered = 0,
4187 .alg.aead = {
4188 .base = {
4189 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4190 .cra_driver_name =
4191 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4192 .cra_blocksize = 1,
4193 .cra_priority = CHCR_AEAD_PRIORITY,
4194 .cra_ctxsize = sizeof(struct chcr_context) +
4195 sizeof(struct chcr_aead_ctx) +
4196 sizeof(struct chcr_authenc_ctx),
4197
4198 },
4199 .ivsize = CTR_RFC3686_IV_SIZE,
4200 .maxauthsize = 0,
4201 .setkey = chcr_aead_digest_null_setkey,
4202 .setauthsize = chcr_authenc_null_setauthsize,
4203 }
4204 },
4205
324429d7
HS
4206};
4207
4208/*
4209 * chcr_unregister_alg - Deregister crypto algorithms with
4210 * kernel framework.
4211 */
4212static int chcr_unregister_alg(void)
4213{
4214 int i;
4215
4216 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4217 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4218 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4219 if (driver_algs[i].is_registered)
4220 crypto_unregister_alg(
4221 &driver_algs[i].alg.crypto);
4222 break;
2debd332
HJ
4223 case CRYPTO_ALG_TYPE_AEAD:
4224 if (driver_algs[i].is_registered)
4225 crypto_unregister_aead(
4226 &driver_algs[i].alg.aead);
4227 break;
324429d7
HS
4228 case CRYPTO_ALG_TYPE_AHASH:
4229 if (driver_algs[i].is_registered)
4230 crypto_unregister_ahash(
4231 &driver_algs[i].alg.hash);
4232 break;
4233 }
4234 driver_algs[i].is_registered = 0;
4235 }
4236 return 0;
4237}
4238
4239#define SZ_AHASH_CTX sizeof(struct chcr_context)
4240#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4241#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4242#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
4243
4244/*
4245 * chcr_register_alg - Register crypto algorithms with kernel framework.
4246 */
4247static int chcr_register_alg(void)
4248{
4249 struct crypto_alg ai;
4250 struct ahash_alg *a_hash;
4251 int err = 0, i;
4252 char *name = NULL;
4253
4254 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4255 if (driver_algs[i].is_registered)
4256 continue;
4257 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4258 case CRYPTO_ALG_TYPE_ABLKCIPHER:
b8fd1f41
HJ
4259 driver_algs[i].alg.crypto.cra_priority =
4260 CHCR_CRA_PRIORITY;
4261 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4262 driver_algs[i].alg.crypto.cra_flags =
4263 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4264 CRYPTO_ALG_NEED_FALLBACK;
4265 driver_algs[i].alg.crypto.cra_ctxsize =
4266 sizeof(struct chcr_context) +
4267 sizeof(struct ablk_ctx);
4268 driver_algs[i].alg.crypto.cra_alignmask = 0;
4269 driver_algs[i].alg.crypto.cra_type =
4270 &crypto_ablkcipher_type;
324429d7
HS
4271 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4272 name = driver_algs[i].alg.crypto.cra_driver_name;
4273 break;
2debd332 4274 case CRYPTO_ALG_TYPE_AEAD:
2debd332 4275 driver_algs[i].alg.aead.base.cra_flags =
0e93708d
HJ
4276 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
4277 CRYPTO_ALG_NEED_FALLBACK;
2debd332
HJ
4278 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4279 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4280 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4281 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4282 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4283 err = crypto_register_aead(&driver_algs[i].alg.aead);
4284 name = driver_algs[i].alg.aead.base.cra_driver_name;
4285 break;
324429d7
HS
4286 case CRYPTO_ALG_TYPE_AHASH:
4287 a_hash = &driver_algs[i].alg.hash;
4288 a_hash->update = chcr_ahash_update;
4289 a_hash->final = chcr_ahash_final;
4290 a_hash->finup = chcr_ahash_finup;
4291 a_hash->digest = chcr_ahash_digest;
4292 a_hash->export = chcr_ahash_export;
4293 a_hash->import = chcr_ahash_import;
4294 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4295 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4296 a_hash->halg.base.cra_module = THIS_MODULE;
4297 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
4298 a_hash->halg.base.cra_alignmask = 0;
4299 a_hash->halg.base.cra_exit = NULL;
4300 a_hash->halg.base.cra_type = &crypto_ahash_type;
4301
4302 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4303 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4304 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4305 a_hash->init = chcr_hmac_init;
4306 a_hash->setkey = chcr_ahash_setkey;
4307 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4308 } else {
4309 a_hash->init = chcr_sha_init;
4310 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4311 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4312 }
4313 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4314 ai = driver_algs[i].alg.hash.halg.base;
4315 name = ai.cra_driver_name;
4316 break;
4317 }
4318 if (err) {
4319 pr_err("chcr : %s : Algorithm registration failed\n",
4320 name);
4321 goto register_err;
4322 } else {
4323 driver_algs[i].is_registered = 1;
4324 }
4325 }
4326 return 0;
4327
4328register_err:
4329 chcr_unregister_alg();
4330 return err;
4331}
4332
4333/*
4334 * start_crypto - Register the crypto algorithms.
4335 * This should called once when the first device comesup. After this
4336 * kernel will start calling driver APIs for crypto operations.
4337 */
4338int start_crypto(void)
4339{
4340 return chcr_register_alg();
4341}
4342
4343/*
4344 * stop_crypto - Deregister all the crypto algorithms with kernel.
4345 * This should be called once when the last device goes down. After this
4346 * kernel will not call the driver API for crypto operations.
4347 */
4348int stop_crypto(void)
4349{
4350 chcr_unregister_alg();
4351 return 0;
4352}