]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/chelsio/chcr_algo.c
crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
8f6acb7f 56#include <crypto/gcm.h>
324429d7 57#include <crypto/sha.h>
2debd332 58#include <crypto/authenc.h>
b8fd1f41
HJ
59#include <crypto/ctr.h>
60#include <crypto/gf128mul.h>
2debd332
HJ
61#include <crypto/internal/aead.h>
62#include <crypto/null.h>
63#include <crypto/internal/skcipher.h>
64#include <crypto/aead.h>
65#include <crypto/scatterwalk.h>
324429d7
HS
66#include <crypto/internal/hash.h>
67
68#include "t4fw_api.h"
69#include "t4_msg.h"
70#include "chcr_core.h"
71#include "chcr_algo.h"
72#include "chcr_crypto.h"
73
2f47d580
HJ
74#define IV AES_BLOCK_SIZE
75
2debd332
HJ
76static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
77{
78 return ctx->crypto_ctx->aeadctx;
79}
80
324429d7
HS
81static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
82{
83 return ctx->crypto_ctx->ablkctx;
84}
85
86static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
87{
88 return ctx->crypto_ctx->hmacctx;
89}
90
2debd332
HJ
91static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
92{
93 return gctx->ctx->gcm;
94}
95
96static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
97{
98 return gctx->ctx->authenc;
99}
100
324429d7
HS
101static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
102{
103 return ctx->dev->u_ctx;
104}
105
106static inline int is_ofld_imm(const struct sk_buff *skb)
107{
2f47d580 108 return (skb->len <= SGE_MAX_WR_LEN);
324429d7
HS
109}
110
111/*
112 * sgl_len - calculates the size of an SGL of the given capacity
113 * @n: the number of SGL entries
114 * Calculates the number of flits needed for a scatter/gather list that
115 * can hold the given number of entries.
116 */
117static inline unsigned int sgl_len(unsigned int n)
118{
119 n--;
120 return (3 * n) / 2 + (n & 1) + 2;
121}
122
2f47d580
HJ
123static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
124 unsigned int entlen,
125 unsigned int skip)
2956f36c
HJ
126{
127 int nents = 0;
128 unsigned int less;
2f47d580 129 unsigned int skip_len = 0;
2956f36c 130
2f47d580
HJ
131 while (sg && skip) {
132 if (sg_dma_len(sg) <= skip) {
133 skip -= sg_dma_len(sg);
134 skip_len = 0;
135 sg = sg_next(sg);
136 } else {
137 skip_len = skip;
138 skip = 0;
139 }
2956f36c
HJ
140 }
141
2f47d580
HJ
142 while (sg && reqlen) {
143 less = min(reqlen, sg_dma_len(sg) - skip_len);
144 nents += DIV_ROUND_UP(less, entlen);
145 reqlen -= less;
146 skip_len = 0;
147 sg = sg_next(sg);
148 }
2956f36c
HJ
149 return nents;
150}
151
2f47d580
HJ
152static inline void chcr_handle_ahash_resp(struct ahash_request *req,
153 unsigned char *input,
154 int err)
155{
156 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
157 int digestsize, updated_digestsize;
158 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
159 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
160
161 if (input == NULL)
162 goto out;
163 reqctx = ahash_request_ctx(req);
164 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
165 if (reqctx->is_sg_map)
166 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
167 if (reqctx->dma_addr)
168 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
169 reqctx->dma_len, DMA_TO_DEVICE);
170 reqctx->dma_addr = 0;
171 updated_digestsize = digestsize;
172 if (digestsize == SHA224_DIGEST_SIZE)
173 updated_digestsize = SHA256_DIGEST_SIZE;
174 else if (digestsize == SHA384_DIGEST_SIZE)
175 updated_digestsize = SHA512_DIGEST_SIZE;
176 if (reqctx->result == 1) {
177 reqctx->result = 0;
178 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
179 digestsize);
180 } else {
181 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
182 updated_digestsize);
183 }
184out:
185 req->base.complete(&req->base, err);
186
187 }
188
189static inline void chcr_handle_aead_resp(struct aead_request *req,
190 unsigned char *input,
191 int err)
192{
193 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
194 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
195 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
196
197
198 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
199 if (reqctx->b0_dma)
200 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
201 reqctx->b0_len, DMA_BIDIRECTIONAL);
202 if (reqctx->verify == VERIFY_SW) {
203 chcr_verify_tag(req, input, &err);
204 reqctx->verify = VERIFY_HW;
205}
206 req->base.complete(&req->base, err);
207
208}
2debd332
HJ
209static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
210{
211 u8 temp[SHA512_DIGEST_SIZE];
212 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
213 int authsize = crypto_aead_authsize(tfm);
214 struct cpl_fw6_pld *fw6_pld;
215 int cmp = 0;
216
217 fw6_pld = (struct cpl_fw6_pld *)input;
218 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
219 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
d600fc8a 220 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
2debd332
HJ
221 } else {
222
223 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
224 authsize, req->assoclen +
225 req->cryptlen - authsize);
d600fc8a 226 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
2debd332
HJ
227 }
228 if (cmp)
229 *err = -EBADMSG;
230 else
231 *err = 0;
232}
233
324429d7
HS
234/*
235 * chcr_handle_resp - Unmap the DMA buffers associated with the request
236 * @req: crypto request
237 */
238int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2debd332 239 int err)
324429d7
HS
240{
241 struct crypto_tfm *tfm = req->tfm;
242 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
ee0863ba 243 struct adapter *adap = padap(ctx->dev);
324429d7
HS
244
245 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2debd332 246 case CRYPTO_ALG_TYPE_AEAD:
2f47d580 247 chcr_handle_aead_resp(aead_request_cast(req), input, err);
2debd332
HJ
248 break;
249
44e9f799 250 case CRYPTO_ALG_TYPE_ABLKCIPHER:
b8fd1f41
HJ
251 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
252 input, err);
324429d7
HS
253 break;
254
255 case CRYPTO_ALG_TYPE_AHASH:
2f47d580 256 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
324429d7 257 }
ee0863ba 258 atomic_inc(&adap->chcr_stats.complete);
2debd332 259 return err;
324429d7
HS
260}
261
2f47d580 262static void get_aes_decrypt_key(unsigned char *dec_key,
39f91a34
HJ
263 const unsigned char *key,
264 unsigned int keylength)
265{
266 u32 temp;
267 u32 w_ring[MAX_NK];
268 int i, j, k;
269 u8 nr, nk;
270
271 switch (keylength) {
272 case AES_KEYLENGTH_128BIT:
273 nk = KEYLENGTH_4BYTES;
274 nr = NUMBER_OF_ROUNDS_10;
275 break;
276 case AES_KEYLENGTH_192BIT:
277 nk = KEYLENGTH_6BYTES;
278 nr = NUMBER_OF_ROUNDS_12;
279 break;
280 case AES_KEYLENGTH_256BIT:
281 nk = KEYLENGTH_8BYTES;
282 nr = NUMBER_OF_ROUNDS_14;
283 break;
284 default:
285 return;
286 }
287 for (i = 0; i < nk; i++)
288 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
289
290 i = 0;
291 temp = w_ring[nk - 1];
292 while (i + nk < (nr + 1) * 4) {
293 if (!(i % nk)) {
294 /* RotWord(temp) */
295 temp = (temp << 8) | (temp >> 24);
296 temp = aes_ks_subword(temp);
297 temp ^= round_constant[i / nk];
298 } else if (nk == 8 && (i % 4 == 0)) {
299 temp = aes_ks_subword(temp);
300 }
301 w_ring[i % nk] ^= temp;
302 temp = w_ring[i % nk];
303 i++;
304 }
305 i--;
306 for (k = 0, j = i % nk; k < nk; k++) {
307 *((u32 *)dec_key + k) = htonl(w_ring[j]);
308 j--;
309 if (j < 0)
310 j += nk;
311 }
312}
313
e7922729 314static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7 315{
ec1bca94 316 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
324429d7
HS
317
318 switch (ds) {
319 case SHA1_DIGEST_SIZE:
e7922729 320 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
321 break;
322 case SHA224_DIGEST_SIZE:
e7922729 323 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
324 break;
325 case SHA256_DIGEST_SIZE:
e7922729 326 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
327 break;
328 case SHA384_DIGEST_SIZE:
e7922729 329 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
330 break;
331 case SHA512_DIGEST_SIZE:
e7922729 332 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
333 break;
334 }
324429d7 335
e7922729 336 return base_hash;
324429d7
HS
337}
338
339static int chcr_compute_partial_hash(struct shash_desc *desc,
340 char *iopad, char *result_hash,
341 int digest_size)
342{
343 struct sha1_state sha1_st;
344 struct sha256_state sha256_st;
345 struct sha512_state sha512_st;
346 int error;
347
348 if (digest_size == SHA1_DIGEST_SIZE) {
349 error = crypto_shash_init(desc) ?:
350 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
351 crypto_shash_export(desc, (void *)&sha1_st);
352 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
353 } else if (digest_size == SHA224_DIGEST_SIZE) {
354 error = crypto_shash_init(desc) ?:
355 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
356 crypto_shash_export(desc, (void *)&sha256_st);
357 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
358
359 } else if (digest_size == SHA256_DIGEST_SIZE) {
360 error = crypto_shash_init(desc) ?:
361 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
362 crypto_shash_export(desc, (void *)&sha256_st);
363 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
364
365 } else if (digest_size == SHA384_DIGEST_SIZE) {
366 error = crypto_shash_init(desc) ?:
367 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
368 crypto_shash_export(desc, (void *)&sha512_st);
369 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
370
371 } else if (digest_size == SHA512_DIGEST_SIZE) {
372 error = crypto_shash_init(desc) ?:
373 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
374 crypto_shash_export(desc, (void *)&sha512_st);
375 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
376 } else {
377 error = -EINVAL;
378 pr_err("Unknown digest size %d\n", digest_size);
379 }
380 return error;
381}
382
383static void chcr_change_order(char *buf, int ds)
384{
385 int i;
386
387 if (ds == SHA512_DIGEST_SIZE) {
388 for (i = 0; i < (ds / sizeof(u64)); i++)
389 *((__be64 *)buf + i) =
390 cpu_to_be64(*((u64 *)buf + i));
391 } else {
392 for (i = 0; i < (ds / sizeof(u32)); i++)
393 *((__be32 *)buf + i) =
394 cpu_to_be32(*((u32 *)buf + i));
395 }
396}
397
398static inline int is_hmac(struct crypto_tfm *tfm)
399{
400 struct crypto_alg *alg = tfm->__crt_alg;
401 struct chcr_alg_template *chcr_crypto_alg =
402 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
403 alg.hash);
5c86a8ff 404 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
405 return 1;
406 return 0;
407}
408
2f47d580
HJ
409static inline void dsgl_walk_init(struct dsgl_walk *walk,
410 struct cpl_rx_phys_dsgl *dsgl)
324429d7 411{
2f47d580
HJ
412 walk->dsgl = dsgl;
413 walk->nents = 0;
414 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
415}
416
417static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
418{
419 struct cpl_rx_phys_dsgl *phys_cpl;
420
421 phys_cpl = walk->dsgl;
324429d7
HS
422
423 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
424 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
2f47d580
HJ
425 phys_cpl->pcirlxorder_to_noofsgentr =
426 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
427 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
428 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
429 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
430 CPL_RX_PHYS_DSGL_DCAID_V(0) |
431 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
432 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
433 phys_cpl->rss_hdr_int.qid = htons(qid);
434 phys_cpl->rss_hdr_int.hash_val = 0;
435}
436
437static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
438 size_t size,
439 dma_addr_t *addr)
440{
441 int j;
442
443 if (!size)
444 return;
445 j = walk->nents;
446 walk->to->len[j % 8] = htons(size);
447 walk->to->addr[j % 8] = cpu_to_be64(*addr);
448 j++;
449 if ((j % 8) == 0)
450 walk->to++;
451 walk->nents = j;
452}
453
454static void dsgl_walk_add_sg(struct dsgl_walk *walk,
455 struct scatterlist *sg,
456 unsigned int slen,
457 unsigned int skip)
458{
459 int skip_len = 0;
460 unsigned int left_size = slen, len = 0;
461 unsigned int j = walk->nents;
462 int offset, ent_len;
463
464 if (!slen)
465 return;
466 while (sg && skip) {
467 if (sg_dma_len(sg) <= skip) {
468 skip -= sg_dma_len(sg);
469 skip_len = 0;
470 sg = sg_next(sg);
471 } else {
472 skip_len = skip;
473 skip = 0;
474 }
475 }
476
2956f36c 477 while (left_size && sg) {
2f47d580 478 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
2956f36c
HJ
479 offset = 0;
480 while (len) {
2f47d580
HJ
481 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
482 walk->to->len[j % 8] = htons(ent_len);
483 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
484 offset + skip_len);
2956f36c
HJ
485 offset += ent_len;
486 len -= ent_len;
487 j++;
488 if ((j % 8) == 0)
2f47d580 489 walk->to++;
2956f36c 490 }
2f47d580
HJ
491 walk->last_sg = sg;
492 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
493 skip_len) + skip_len;
494 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
495 skip_len = 0;
2956f36c
HJ
496 sg = sg_next(sg);
497 }
2f47d580
HJ
498 walk->nents = j;
499}
500
501static inline void ulptx_walk_init(struct ulptx_walk *walk,
502 struct ulptx_sgl *ulp)
503{
504 walk->sgl = ulp;
505 walk->nents = 0;
506 walk->pair_idx = 0;
507 walk->pair = ulp->sge;
508 walk->last_sg = NULL;
509 walk->last_sg_len = 0;
510}
511
512static inline void ulptx_walk_end(struct ulptx_walk *walk)
513{
514 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
515 ULPTX_NSGE_V(walk->nents));
516}
2956f36c 517
2f47d580
HJ
518
519static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
520 size_t size,
521 dma_addr_t *addr)
522{
523 if (!size)
524 return;
525
526 if (walk->nents == 0) {
527 walk->sgl->len0 = cpu_to_be32(size);
528 walk->sgl->addr0 = cpu_to_be64(*addr);
529 } else {
530 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
531 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
532 walk->pair_idx = !walk->pair_idx;
533 if (!walk->pair_idx)
534 walk->pair++;
535 }
536 walk->nents++;
324429d7
HS
537}
538
2f47d580 539static void ulptx_walk_add_sg(struct ulptx_walk *walk,
adf1ca61 540 struct scatterlist *sg,
2f47d580
HJ
541 unsigned int len,
542 unsigned int skip)
324429d7 543{
2f47d580
HJ
544 int small;
545 int skip_len = 0;
546 unsigned int sgmin;
324429d7 547
2f47d580
HJ
548 if (!len)
549 return;
550
551 while (sg && skip) {
552 if (sg_dma_len(sg) <= skip) {
553 skip -= sg_dma_len(sg);
554 skip_len = 0;
555 sg = sg_next(sg);
556 } else {
557 skip_len = skip;
558 skip = 0;
559 }
560 }
561 if (walk->nents == 0) {
562 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
563 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
564 walk->sgl->len0 = cpu_to_be32(sgmin);
565 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
566 walk->nents++;
567 len -= sgmin;
568 walk->last_sg = sg;
569 walk->last_sg_len = sgmin + skip_len;
570 skip_len += sgmin;
571 if (sg_dma_len(sg) == skip_len) {
572 sg = sg_next(sg);
573 skip_len = 0;
574 }
575 }
576
577 while (sg && len) {
578 small = min(sg_dma_len(sg) - skip_len, len);
579 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
580 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
581 walk->pair->addr[walk->pair_idx] =
582 cpu_to_be64(sg_dma_address(sg) + skip_len);
583 walk->pair_idx = !walk->pair_idx;
584 walk->nents++;
585 if (!walk->pair_idx)
586 walk->pair++;
587 len -= sgmin;
588 skip_len += sgmin;
589 walk->last_sg = sg;
590 walk->last_sg_len = skip_len;
591 if (sg_dma_len(sg) == skip_len) {
592 sg = sg_next(sg);
593 skip_len = 0;
594 }
324429d7 595 }
324429d7
HS
596}
597
2debd332
HJ
598static inline int get_aead_subtype(struct crypto_aead *aead)
599{
600 struct aead_alg *alg = crypto_aead_alg(aead);
601 struct chcr_alg_template *chcr_crypto_alg =
602 container_of(alg, struct chcr_alg_template, alg.aead);
603 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
604}
605
324429d7
HS
606static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
607{
608 struct crypto_alg *alg = tfm->__crt_alg;
609 struct chcr_alg_template *chcr_crypto_alg =
610 container_of(alg, struct chcr_alg_template, alg.crypto);
611
612 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
613}
614
b8fd1f41
HJ
615static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
616{
617 struct adapter *adap = netdev2adap(dev);
618 struct sge_uld_txq_info *txq_info =
619 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
620 struct sge_uld_txq *txq;
621 int ret = 0;
622
623 local_bh_disable();
624 txq = &txq_info->uldtxq[idx];
625 spin_lock(&txq->sendq.lock);
626 if (txq->full)
627 ret = -1;
628 spin_unlock(&txq->sendq.lock);
629 local_bh_enable();
630 return ret;
631}
632
324429d7
HS
633static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
634 struct _key_ctx *key_ctx)
635{
636 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 637 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
638 } else {
639 memcpy(key_ctx->key,
640 ablkctx->key + (ablkctx->enckey_len >> 1),
641 ablkctx->enckey_len >> 1);
cc1b156d
HJ
642 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
643 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
644 }
645 return 0;
646}
b8fd1f41
HJ
647static int chcr_sg_ent_in_wr(struct scatterlist *src,
648 struct scatterlist *dst,
649 unsigned int minsg,
2f47d580
HJ
650 unsigned int space,
651 unsigned int srcskip,
652 unsigned int dstskip)
b8fd1f41
HJ
653{
654 int srclen = 0, dstlen = 0;
2f47d580 655 int srcsg = minsg, dstsg = minsg;
2956f36c 656 int offset = 0, less;
b8fd1f41 657
2f47d580
HJ
658 if (sg_dma_len(src) == srcskip) {
659 src = sg_next(src);
660 srcskip = 0;
661 }
662
663 if (sg_dma_len(dst) == dstskip) {
664 dst = sg_next(dst);
665 dstskip = 0;
666 }
667
668 while (src && dst &&
b8fd1f41 669 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
2f47d580 670 srclen += (sg_dma_len(src) - srcskip);
b8fd1f41 671 srcsg++;
2956f36c 672 offset = 0;
b8fd1f41
HJ
673 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
674 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
675 if (srclen <= dstlen)
676 break;
2f47d580
HJ
677 less = min_t(unsigned int, sg_dma_len(dst) - offset -
678 dstskip, CHCR_DST_SG_SIZE);
2956f36c
HJ
679 dstlen += less;
680 offset += less;
2f47d580 681 if (offset == sg_dma_len(dst)) {
2956f36c
HJ
682 dst = sg_next(dst);
683 offset = 0;
684 }
b8fd1f41 685 dstsg++;
2f47d580 686 dstskip = 0;
b8fd1f41
HJ
687 }
688 src = sg_next(src);
2f47d580 689 srcskip = 0;
b8fd1f41 690 }
b8fd1f41
HJ
691 return min(srclen, dstlen);
692}
693
694static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
695 u32 flags,
696 struct scatterlist *src,
697 struct scatterlist *dst,
698 unsigned int nbytes,
699 u8 *iv,
700 unsigned short op_type)
701{
702 int err;
703
704 SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705 skcipher_request_set_tfm(subreq, cipher);
706 skcipher_request_set_callback(subreq, flags, NULL, NULL);
707 skcipher_request_set_crypt(subreq, src, dst,
708 nbytes, iv);
709
710 err = op_type ? crypto_skcipher_decrypt(subreq) :
711 crypto_skcipher_encrypt(subreq);
712 skcipher_request_zero(subreq);
713
714 return err;
324429d7 715
b8fd1f41 716}
324429d7 717static inline void create_wreq(struct chcr_context *ctx,
358961d1 718 struct chcr_wr *chcr_req,
2f47d580
HJ
719 struct crypto_async_request *req,
720 unsigned int imm,
570265bf 721 int hash_sz,
2f47d580 722 unsigned int len16,
2512a624
HJ
723 unsigned int sc_len,
724 unsigned int lcb)
324429d7
HS
725{
726 struct uld_ctx *u_ctx = ULD_CTX(ctx);
72a56ca9 727 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
324429d7 728
324429d7 729
570265bf 730 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
358961d1 731 chcr_req->wreq.pld_size_hash_size =
570265bf 732 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1 733 chcr_req->wreq.len16_pkd =
2f47d580 734 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
358961d1
HJ
735 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
736 chcr_req->wreq.rx_chid_to_rx_q_id =
8a13449f 737 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
570265bf 738 !!lcb, ctx->tx_qidx);
324429d7 739
8a13449f
HJ
740 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
741 qid);
2f47d580
HJ
742 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
743 ((sizeof(chcr_req->wreq)) >> 4)));
324429d7 744
2f47d580 745 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
358961d1 746 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
2f47d580 747 sizeof(chcr_req->key_ctx) + sc_len);
324429d7
HS
748}
749
750/**
751 * create_cipher_wr - form the WR for cipher operations
752 * @req: cipher req.
753 * @ctx: crypto driver context of the request.
754 * @qid: ingress qid where response of this WR should be received.
755 * @op_type: encryption or decryption
756 */
b8fd1f41 757static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
324429d7 758{
b8fd1f41 759 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2f47d580 760 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
324429d7 761 struct sk_buff *skb = NULL;
358961d1 762 struct chcr_wr *chcr_req;
324429d7 763 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 764 struct ulptx_sgl *ulptx;
b8fd1f41
HJ
765 struct chcr_blkcipher_req_ctx *reqctx =
766 ablkcipher_request_ctx(wrparam->req);
2f47d580 767 unsigned int temp = 0, transhdr_len, dst_size;
b8fd1f41 768 int error;
2956f36c 769 int nents;
2f47d580 770 unsigned int kctx_len;
b8fd1f41
HJ
771 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
772 GFP_KERNEL : GFP_ATOMIC;
2f47d580 773 struct adapter *adap = padap(c_ctx(tfm)->dev);
324429d7 774
2f47d580
HJ
775 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
776 reqctx->dst_ofst);
777 dst_size = get_space_for_phys_dsgl(nents + 1);
358961d1 778 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
2f47d580
HJ
779 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
780 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
781 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
782 temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
783 * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
784 transhdr_len += temp;
785 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
786 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
b8fd1f41
HJ
787 if (!skb) {
788 error = -ENOMEM;
789 goto err;
790 }
de77b966 791 chcr_req = __skb_put_zero(skb, transhdr_len);
358961d1 792 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580 793 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
358961d1 794
2f47d580 795 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
358961d1 796 chcr_req->sec_cpl.aadstart_cipherstop_hi =
2f47d580 797 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
358961d1
HJ
798
799 chcr_req->sec_cpl.cipherstop_lo_authinsert =
800 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
b8fd1f41 801 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
324429d7 802 ablkctx->ciph_mode,
2f47d580 803 0, 0, IV >> 1);
358961d1 804 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
2f47d580 805 0, 0, dst_size);
324429d7 806
358961d1 807 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
b8fd1f41
HJ
808 if ((reqctx->op == CHCR_DECRYPT_OP) &&
809 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
810 CRYPTO_ALG_SUB_TYPE_CTR)) &&
811 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
812 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
358961d1 813 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7 814 } else {
b8fd1f41
HJ
815 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
816 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
358961d1
HJ
817 memcpy(chcr_req->key_ctx.key, ablkctx->key,
818 ablkctx->enckey_len);
324429d7 819 } else {
358961d1 820 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
821 (ablkctx->enckey_len >> 1),
822 ablkctx->enckey_len >> 1);
358961d1 823 memcpy(chcr_req->key_ctx.key +
324429d7
HS
824 (ablkctx->enckey_len >> 1),
825 ablkctx->key,
826 ablkctx->enckey_len >> 1);
827 }
828 }
358961d1 829 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
830 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
831 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
832 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
324429d7 833
ee0863ba 834 atomic_inc(&adap->chcr_stats.cipher_rqst);
2f47d580
HJ
835 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
836 +(reqctx->imm ? (IV + wrparam->bytes) : 0);
837 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
838 transhdr_len, temp,
2512a624 839 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
5c86a8ff 840 reqctx->skb = skb;
324429d7 841 return skb;
b8fd1f41
HJ
842err:
843 return ERR_PTR(error);
844}
845
846static inline int chcr_keyctx_ck_size(unsigned int keylen)
847{
848 int ck_size = 0;
849
850 if (keylen == AES_KEYSIZE_128)
851 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
852 else if (keylen == AES_KEYSIZE_192)
853 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
854 else if (keylen == AES_KEYSIZE_256)
855 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
856 else
857 ck_size = 0;
858
859 return ck_size;
860}
861static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
862 const u8 *key,
863 unsigned int keylen)
864{
865 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2f47d580 866 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
867 int err = 0;
868
869 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
870 crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
871 CRYPTO_TFM_REQ_MASK);
872 err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
873 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
874 tfm->crt_flags |=
875 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
876 CRYPTO_TFM_RES_MASK;
877 return err;
324429d7
HS
878}
879
b8fd1f41
HJ
880static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
881 const u8 *key,
324429d7
HS
882 unsigned int keylen)
883{
2f47d580 884 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7
HS
885 unsigned int ck_size, context_size;
886 u16 alignment = 0;
b8fd1f41 887 int err;
324429d7 888
b8fd1f41
HJ
889 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
890 if (err)
324429d7 891 goto badkey_err;
b8fd1f41
HJ
892
893 ck_size = chcr_keyctx_ck_size(keylen);
894 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
cc1b156d
HJ
895 memcpy(ablkctx->key, key, keylen);
896 ablkctx->enckey_len = keylen;
897 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
898 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
899 keylen + alignment) >> 4;
900
901 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
902 0, 0, context_size);
903 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
904 return 0;
905badkey_err:
b8fd1f41 906 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
324429d7 907 ablkctx->enckey_len = 0;
b8fd1f41
HJ
908
909 return err;
324429d7
HS
910}
911
b8fd1f41
HJ
912static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
913 const u8 *key,
914 unsigned int keylen)
324429d7 915{
2f47d580 916 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
917 unsigned int ck_size, context_size;
918 u16 alignment = 0;
919 int err;
920
921 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
922 if (err)
923 goto badkey_err;
924 ck_size = chcr_keyctx_ck_size(keylen);
925 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
926 memcpy(ablkctx->key, key, keylen);
927 ablkctx->enckey_len = keylen;
928 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
929 keylen + alignment) >> 4;
930
931 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
932 0, 0, context_size);
933 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
934
935 return 0;
936badkey_err:
937 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
938 ablkctx->enckey_len = 0;
939
940 return err;
941}
942
943static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
944 const u8 *key,
945 unsigned int keylen)
946{
2f47d580 947 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
948 unsigned int ck_size, context_size;
949 u16 alignment = 0;
950 int err;
951
952 if (keylen < CTR_RFC3686_NONCE_SIZE)
953 return -EINVAL;
954 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
955 CTR_RFC3686_NONCE_SIZE);
956
957 keylen -= CTR_RFC3686_NONCE_SIZE;
958 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
959 if (err)
960 goto badkey_err;
961
962 ck_size = chcr_keyctx_ck_size(keylen);
963 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
964 memcpy(ablkctx->key, key, keylen);
965 ablkctx->enckey_len = keylen;
966 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
967 keylen + alignment) >> 4;
968
969 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
970 0, 0, context_size);
971 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
972
973 return 0;
974badkey_err:
975 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
976 ablkctx->enckey_len = 0;
977
978 return err;
979}
980static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
981{
982 unsigned int size = AES_BLOCK_SIZE;
983 __be32 *b = (__be32 *)(dstiv + size);
984 u32 c, prev;
985
986 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
987 for (; size >= 4; size -= 4) {
988 prev = be32_to_cpu(*--b);
989 c = prev + add;
990 *b = cpu_to_be32(c);
991 if (prev < c)
992 break;
993 add = 1;
994 }
995
996}
997
998static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
999{
1000 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1001 u64 c;
1002 u32 temp = be32_to_cpu(*--b);
1003
1004 temp = ~temp;
1005 c = (u64)temp + 1; // No of block can processed withou overflow
1006 if ((bytes / AES_BLOCK_SIZE) > c)
1007 bytes = c * AES_BLOCK_SIZE;
1008 return bytes;
1009}
1010
1011static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
1012{
1013 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580 1014 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
b8fd1f41
HJ
1015 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1016 struct crypto_cipher *cipher;
1017 int ret, i;
1018 u8 *key;
1019 unsigned int keylen;
de1a00ac
HJ
1020 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1021 int round8 = round / 8;
b8fd1f41 1022
d3f1d2f7 1023 cipher = ablkctx->aes_generic;
de1a00ac 1024 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
b8fd1f41 1025
b8fd1f41
HJ
1026 keylen = ablkctx->enckey_len / 2;
1027 key = ablkctx->key + keylen;
1028 ret = crypto_cipher_setkey(cipher, key, keylen);
1029 if (ret)
d3f1d2f7 1030 goto out;
2f47d580 1031 /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
de1a00ac
HJ
1032 for (i = 0; i < round8; i++)
1033 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1034
1035 for (i = 0; i < (round % 8); i++)
b8fd1f41
HJ
1036 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1037
1038 crypto_cipher_decrypt_one(cipher, iv, iv);
b8fd1f41
HJ
1039out:
1040 return ret;
1041}
1042
1043static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1044 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1045{
1046 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1047 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1048 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
ab677ff4 1049 int ret = 0;
324429d7 1050
b8fd1f41
HJ
1051 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1052 ctr_add_iv(iv, req->info, (reqctx->processed /
1053 AES_BLOCK_SIZE));
1054 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1055 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1056 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1057 AES_BLOCK_SIZE) + 1);
1058 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1059 ret = chcr_update_tweak(req, iv);
1060 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1061 if (reqctx->op)
1062 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1063 16,
1064 reqctx->processed - AES_BLOCK_SIZE);
1065 else
1066 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1067 }
1068
324429d7 1069 return ret;
b8fd1f41 1070
324429d7
HS
1071}
1072
b8fd1f41
HJ
1073/* We need separate function for final iv because in rfc3686 Initial counter
1074 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1075 * for subsequent update requests
1076 */
1077
1078static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1079 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1080{
1081 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1082 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1083 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1084 int ret = 0;
1085
1086 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1087 ctr_add_iv(iv, req->info, (reqctx->processed /
1088 AES_BLOCK_SIZE));
1089 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1090 ret = chcr_update_tweak(req, iv);
1091 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1092 if (reqctx->op)
1093 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1094 16,
1095 reqctx->processed - AES_BLOCK_SIZE);
1096 else
1097 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1098
1099 }
1100 return ret;
1101
1102}
1103
1104
1105static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1106 unsigned char *input, int err)
324429d7
HS
1107{
1108 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580
HJ
1109 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1110 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
324429d7 1111 struct sk_buff *skb;
b8fd1f41
HJ
1112 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1113 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1114 struct cipher_wr_param wrparam;
1115 int bytes;
1116
b8fd1f41 1117 if (err)
2f47d580 1118 goto unmap;
b8fd1f41 1119 if (req->nbytes == reqctx->processed) {
2f47d580
HJ
1120 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1121 req);
b8fd1f41
HJ
1122 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1123 goto complete;
1124 }
1125
1126 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1127 c_ctx(tfm)->tx_qidx))) {
b8fd1f41
HJ
1128 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1129 err = -EBUSY;
2f47d580 1130 goto unmap;
b8fd1f41
HJ
1131 }
1132
1133 }
2f47d580
HJ
1134 if (!reqctx->imm) {
1135 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1136 SPACE_LEFT(ablkctx->enckey_len),
1137 reqctx->src_ofst, reqctx->dst_ofst);
b8fd1f41
HJ
1138 if ((bytes + reqctx->processed) >= req->nbytes)
1139 bytes = req->nbytes - reqctx->processed;
1140 else
1141 bytes = ROUND_16(bytes);
2f47d580
HJ
1142 } else {
1143 /*CTR mode counter overfloa*/
1144 bytes = req->nbytes - reqctx->processed;
1145 }
1146 dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1147 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
b8fd1f41 1148 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
2f47d580
HJ
1149 dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1150 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
b8fd1f41 1151 if (err)
2f47d580 1152 goto unmap;
b8fd1f41
HJ
1153
1154 if (unlikely(bytes == 0)) {
2f47d580
HJ
1155 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1156 req);
b8fd1f41
HJ
1157 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1158 req->base.flags,
2f47d580
HJ
1159 req->src,
1160 req->dst,
1161 req->nbytes,
1162 req->info,
b8fd1f41
HJ
1163 reqctx->op);
1164 goto complete;
1165 }
1166
1167 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1168 CRYPTO_ALG_SUB_TYPE_CTR)
1169 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
2f47d580 1170 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
b8fd1f41
HJ
1171 wrparam.req = req;
1172 wrparam.bytes = bytes;
1173 skb = create_cipher_wr(&wrparam);
1174 if (IS_ERR(skb)) {
1175 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1176 err = PTR_ERR(skb);
2f47d580 1177 goto unmap;
b8fd1f41
HJ
1178 }
1179 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1180 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
b8fd1f41 1181 chcr_send_wr(skb);
2f47d580
HJ
1182 reqctx->last_req_len = bytes;
1183 reqctx->processed += bytes;
b8fd1f41 1184 return 0;
2f47d580
HJ
1185unmap:
1186 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1187complete:
1188 req->base.complete(&req->base, err);
1189 return err;
1190}
1191
1192static int process_cipher(struct ablkcipher_request *req,
1193 unsigned short qid,
1194 struct sk_buff **skb,
1195 unsigned short op_type)
1196{
1197 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1198 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1199 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2f47d580 1200 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
b8fd1f41 1201 struct cipher_wr_param wrparam;
2956f36c 1202 int bytes, err = -EINVAL;
b8fd1f41 1203
b8fd1f41
HJ
1204 reqctx->processed = 0;
1205 if (!req->info)
1206 goto error;
1207 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1208 (req->nbytes == 0) ||
1209 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1210 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1211 ablkctx->enckey_len, req->nbytes, ivsize);
1212 goto error;
1213 }
2f47d580
HJ
1214 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1215 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1216 AES_MIN_KEY_SIZE +
1217 sizeof(struct cpl_rx_phys_dsgl) +
1218 /*Min dsgl size*/
1219 32))) {
1220 /* Can be sent as Imm*/
1221 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1222
1223 dnents = sg_nents_xlen(req->dst, req->nbytes,
1224 CHCR_DST_SG_SIZE, 0);
1225 dnents += 1; // IV
1226 phys_dsgl = get_space_for_phys_dsgl(dnents);
1227 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
1228 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1229 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1230 SGE_MAX_WR_LEN;
1231 bytes = IV + req->nbytes;
1232
1233 } else {
1234 reqctx->imm = 0;
1235 }
1236
1237 if (!reqctx->imm) {
1238 bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1239 MIN_CIPHER_SG,
1240 SPACE_LEFT(ablkctx->enckey_len),
1241 0, 0);
b8fd1f41
HJ
1242 if ((bytes + reqctx->processed) >= req->nbytes)
1243 bytes = req->nbytes - reqctx->processed;
1244 else
1245 bytes = ROUND_16(bytes);
2f47d580 1246 } else {
b8fd1f41 1247 bytes = req->nbytes;
2f47d580 1248 }
b8fd1f41
HJ
1249 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1250 CRYPTO_ALG_SUB_TYPE_CTR) {
1251 bytes = adjust_ctr_overflow(req->info, bytes);
1252 }
1253 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1254 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1255 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1256 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1257 CTR_RFC3686_IV_SIZE);
1258
1259 /* initialize counter portion of counter block */
1260 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1261 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1262
1263 } else {
1264
2f47d580 1265 memcpy(reqctx->iv, req->info, IV);
b8fd1f41
HJ
1266 }
1267 if (unlikely(bytes == 0)) {
2f47d580
HJ
1268 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1269 req);
b8fd1f41
HJ
1270 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1271 req->base.flags,
1272 req->src,
1273 req->dst,
1274 req->nbytes,
1275 req->info,
1276 op_type);
1277 goto error;
1278 }
b8fd1f41 1279 reqctx->op = op_type;
2f47d580
HJ
1280 reqctx->srcsg = req->src;
1281 reqctx->dstsg = req->dst;
1282 reqctx->src_ofst = 0;
1283 reqctx->dst_ofst = 0;
b8fd1f41
HJ
1284 wrparam.qid = qid;
1285 wrparam.req = req;
1286 wrparam.bytes = bytes;
1287 *skb = create_cipher_wr(&wrparam);
1288 if (IS_ERR(*skb)) {
1289 err = PTR_ERR(*skb);
2f47d580 1290 goto unmap;
b8fd1f41 1291 }
2f47d580
HJ
1292 reqctx->processed = bytes;
1293 reqctx->last_req_len = bytes;
b8fd1f41
HJ
1294
1295 return 0;
2f47d580
HJ
1296unmap:
1297 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1298error:
1299 return err;
1300}
1301
1302static int chcr_aes_encrypt(struct ablkcipher_request *req)
1303{
1304 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
b8fd1f41
HJ
1305 struct sk_buff *skb = NULL;
1306 int err;
2f47d580 1307 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
324429d7
HS
1308
1309 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1310 c_ctx(tfm)->tx_qidx))) {
324429d7
HS
1311 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1312 return -EBUSY;
1313 }
1314
2f47d580
HJ
1315 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1316 &skb, CHCR_ENCRYPT_OP);
b8fd1f41
HJ
1317 if (err || !skb)
1318 return err;
324429d7 1319 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1320 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
324429d7
HS
1321 chcr_send_wr(skb);
1322 return -EINPROGRESS;
1323}
1324
1325static int chcr_aes_decrypt(struct ablkcipher_request *req)
1326{
1327 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
2f47d580 1328 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
b8fd1f41
HJ
1329 struct sk_buff *skb = NULL;
1330 int err;
324429d7
HS
1331
1332 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1333 c_ctx(tfm)->tx_qidx))) {
324429d7
HS
1334 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1335 return -EBUSY;
1336 }
1337
2f47d580
HJ
1338 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1339 &skb, CHCR_DECRYPT_OP);
b8fd1f41
HJ
1340 if (err || !skb)
1341 return err;
324429d7 1342 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1343 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
324429d7
HS
1344 chcr_send_wr(skb);
1345 return -EINPROGRESS;
1346}
1347
1348static int chcr_device_init(struct chcr_context *ctx)
1349{
14c19b17 1350 struct uld_ctx *u_ctx = NULL;
72a56ca9 1351 struct adapter *adap;
324429d7 1352 unsigned int id;
72a56ca9 1353 int txq_perchan, txq_idx, ntxq;
324429d7
HS
1354 int err = 0, rxq_perchan, rxq_idx;
1355
1356 id = smp_processor_id();
1357 if (!ctx->dev) {
14c19b17
HJ
1358 u_ctx = assign_chcr_device();
1359 if (!u_ctx) {
324429d7
HS
1360 pr_err("chcr device assignment fails\n");
1361 goto out;
1362 }
14c19b17 1363 ctx->dev = u_ctx->dev;
72a56ca9
HJ
1364 adap = padap(ctx->dev);
1365 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1366 adap->vres.ncrypto_fc);
324429d7 1367 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
72a56ca9 1368 txq_perchan = ntxq / u_ctx->lldi.nchan;
324429d7
HS
1369 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1370 rxq_idx += id % rxq_perchan;
72a56ca9
HJ
1371 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1372 txq_idx += id % txq_perchan;
324429d7 1373 spin_lock(&ctx->dev->lock_chcr_dev);
72a56ca9
HJ
1374 ctx->rx_qidx = rxq_idx;
1375 ctx->tx_qidx = txq_idx;
ab677ff4 1376 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
8a13449f 1377 ctx->dev->rx_channel_id = 0;
324429d7
HS
1378 spin_unlock(&ctx->dev->lock_chcr_dev);
1379 }
1380out:
1381 return err;
1382}
1383
1384static int chcr_cra_init(struct crypto_tfm *tfm)
1385{
b8fd1f41
HJ
1386 struct crypto_alg *alg = tfm->__crt_alg;
1387 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1388 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1389
1390 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1391 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1392 if (IS_ERR(ablkctx->sw_cipher)) {
1393 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1394 return PTR_ERR(ablkctx->sw_cipher);
1395 }
d3f1d2f7
HJ
1396
1397 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1398 /* To update tweak*/
1399 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1400 if (IS_ERR(ablkctx->aes_generic)) {
1401 pr_err("failed to allocate aes cipher for tweak\n");
1402 return PTR_ERR(ablkctx->aes_generic);
1403 }
1404 } else
1405 ablkctx->aes_generic = NULL;
1406
324429d7
HS
1407 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1408 return chcr_device_init(crypto_tfm_ctx(tfm));
1409}
1410
b8fd1f41
HJ
1411static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1412{
1413 struct crypto_alg *alg = tfm->__crt_alg;
1414 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1415 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1416
1417 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1418 * cannot be used as fallback in chcr_handle_cipher_response
1419 */
1420 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1421 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1422 if (IS_ERR(ablkctx->sw_cipher)) {
1423 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1424 return PTR_ERR(ablkctx->sw_cipher);
1425 }
1426 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1427 return chcr_device_init(crypto_tfm_ctx(tfm));
1428}
1429
1430
1431static void chcr_cra_exit(struct crypto_tfm *tfm)
1432{
1433 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1434 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1435
1436 crypto_free_skcipher(ablkctx->sw_cipher);
d3f1d2f7
HJ
1437 if (ablkctx->aes_generic)
1438 crypto_free_cipher(ablkctx->aes_generic);
b8fd1f41
HJ
1439}
1440
324429d7
HS
1441static int get_alg_config(struct algo_param *params,
1442 unsigned int auth_size)
1443{
1444 switch (auth_size) {
1445 case SHA1_DIGEST_SIZE:
1446 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1447 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1448 params->result_size = SHA1_DIGEST_SIZE;
1449 break;
1450 case SHA224_DIGEST_SIZE:
1451 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1452 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1453 params->result_size = SHA256_DIGEST_SIZE;
1454 break;
1455 case SHA256_DIGEST_SIZE:
1456 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1457 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1458 params->result_size = SHA256_DIGEST_SIZE;
1459 break;
1460 case SHA384_DIGEST_SIZE:
1461 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1462 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1463 params->result_size = SHA512_DIGEST_SIZE;
1464 break;
1465 case SHA512_DIGEST_SIZE:
1466 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1467 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1468 params->result_size = SHA512_DIGEST_SIZE;
1469 break;
1470 default:
1471 pr_err("chcr : ERROR, unsupported digest size\n");
1472 return -EINVAL;
1473 }
1474 return 0;
1475}
1476
e7922729 1477static inline void chcr_free_shash(struct crypto_shash *base_hash)
324429d7 1478{
e7922729 1479 crypto_free_shash(base_hash);
324429d7
HS
1480}
1481
1482/**
358961d1 1483 * create_hash_wr - Create hash work request
324429d7
HS
1484 * @req - Cipher req base
1485 */
358961d1 1486static struct sk_buff *create_hash_wr(struct ahash_request *req,
2debd332 1487 struct hash_wr_param *param)
324429d7
HS
1488{
1489 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1490 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2f47d580 1491 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7 1492 struct sk_buff *skb = NULL;
2f47d580 1493 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
358961d1 1494 struct chcr_wr *chcr_req;
2f47d580
HJ
1495 struct ulptx_sgl *ulptx;
1496 unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
324429d7 1497 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2f47d580 1498 unsigned int kctx_len = 0, temp = 0;
324429d7 1499 u8 hash_size_in_response = 0;
358961d1
HJ
1500 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1501 GFP_ATOMIC;
2f47d580
HJ
1502 struct adapter *adap = padap(h_ctx(tfm)->dev);
1503 int error = 0;
324429d7
HS
1504
1505 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
358961d1 1506 kctx_len = param->alg_prm.result_size + iopad_alignment;
324429d7
HS
1507 if (param->opad_needed)
1508 kctx_len += param->alg_prm.result_size + iopad_alignment;
1509
1510 if (req_ctx->result)
1511 hash_size_in_response = digestsize;
1512 else
1513 hash_size_in_response = param->alg_prm.result_size;
1514 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
2f47d580
HJ
1515 req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
1516 SGE_MAX_WR_LEN;
1517 nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
1518 nents += param->bfr_len ? 1 : 0;
1519 transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
1520 param->sg_len), 16) * 16) :
1521 (sgl_len(nents) * 8);
1522 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
1523
1524 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
324429d7 1525 if (!skb)
2f47d580 1526 return ERR_PTR(-ENOMEM);
de77b966 1527 chcr_req = __skb_put_zero(skb, transhdr_len);
324429d7 1528
358961d1 1529 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580 1530 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
358961d1 1531 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 1532
358961d1 1533 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 1534 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 1535 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 1536 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 1537 chcr_req->sec_cpl.seqno_numivs =
324429d7 1538 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 1539 param->opad_needed, 0);
324429d7 1540
358961d1 1541 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
1542 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1543
358961d1
HJ
1544 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1545 param->alg_prm.result_size);
324429d7
HS
1546
1547 if (param->opad_needed)
358961d1
HJ
1548 memcpy(chcr_req->key_ctx.key +
1549 ((param->alg_prm.result_size <= 32) ? 32 :
1550 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
1551 hmacctx->opad, param->alg_prm.result_size);
1552
358961d1 1553 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
1554 param->alg_prm.mk_size, 0,
1555 param->opad_needed,
358961d1
HJ
1556 ((kctx_len +
1557 sizeof(chcr_req->key_ctx)) >> 4));
1558 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
2f47d580
HJ
1559 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
1560 DUMMY_BYTES);
1561 if (param->bfr_len != 0) {
1562 req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
1563 req_ctx->reqbfr, param->bfr_len,
1564 DMA_TO_DEVICE);
1565 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1566 req_ctx->dma_addr)) {
1567 error = -ENOMEM;
1568 goto err;
1569 }
1570 req_ctx->dma_len = param->bfr_len;
1571 } else {
1572 req_ctx->dma_addr = 0;
1573 }
1574 chcr_add_hash_src_ent(req, ulptx, param);
1575 /* Request upto max wr size */
1576 temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
1577 + param->bfr_len) : 0);
ee0863ba 1578 atomic_inc(&adap->chcr_stats.digest_rqst);
2f47d580
HJ
1579 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
1580 hash_size_in_response, transhdr_len,
1581 temp, 0);
324429d7 1582 req_ctx->skb = skb;
324429d7 1583 return skb;
2f47d580
HJ
1584err:
1585 kfree_skb(skb);
1586 return ERR_PTR(error);
324429d7
HS
1587}
1588
1589static int chcr_ahash_update(struct ahash_request *req)
1590{
1591 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1592 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1593 struct uld_ctx *u_ctx = NULL;
1594 struct sk_buff *skb;
1595 u8 remainder = 0, bs;
1596 unsigned int nbytes = req->nbytes;
1597 struct hash_wr_param params;
2f47d580 1598 int error;
324429d7
HS
1599
1600 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1601
2f47d580 1602 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7 1603 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1604 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1605 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1606 return -EBUSY;
1607 }
1608
44fce12a
HJ
1609 if (nbytes + req_ctx->reqlen >= bs) {
1610 remainder = (nbytes + req_ctx->reqlen) % bs;
1611 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 1612 } else {
44fce12a
HJ
1613 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1614 + req_ctx->reqlen, nbytes, 0);
1615 req_ctx->reqlen += nbytes;
324429d7
HS
1616 return 0;
1617 }
2f47d580
HJ
1618 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1619 if (error)
1620 return -ENOMEM;
324429d7
HS
1621 params.opad_needed = 0;
1622 params.more = 1;
1623 params.last = 0;
44fce12a
HJ
1624 params.sg_len = nbytes - req_ctx->reqlen;
1625 params.bfr_len = req_ctx->reqlen;
324429d7
HS
1626 params.scmd1 = 0;
1627 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1628 req_ctx->result = 0;
1629 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 1630 skb = create_hash_wr(req, &params);
2f47d580
HJ
1631 if (IS_ERR(skb)) {
1632 error = PTR_ERR(skb);
1633 goto unmap;
1634 }
324429d7 1635
44fce12a
HJ
1636 if (remainder) {
1637 u8 *temp;
1638 /* Swap buffers */
1639 temp = req_ctx->reqbfr;
1640 req_ctx->reqbfr = req_ctx->skbfr;
1641 req_ctx->skbfr = temp;
324429d7 1642 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 1643 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 1644 remainder);
44fce12a
HJ
1645 }
1646 req_ctx->reqlen = remainder;
324429d7 1647 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1648 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1649 chcr_send_wr(skb);
1650
1651 return -EINPROGRESS;
2f47d580
HJ
1652unmap:
1653 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1654 return error;
324429d7
HS
1655}
1656
1657static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1658{
1659 memset(bfr_ptr, 0, bs);
1660 *bfr_ptr = 0x80;
1661 if (bs == 64)
1662 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1663 else
1664 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1665}
1666
1667static int chcr_ahash_final(struct ahash_request *req)
1668{
1669 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1670 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1671 struct hash_wr_param params;
1672 struct sk_buff *skb;
1673 struct uld_ctx *u_ctx = NULL;
1674 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1675
2f47d580 1676 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7
HS
1677 if (is_hmac(crypto_ahash_tfm(rtfm)))
1678 params.opad_needed = 1;
1679 else
1680 params.opad_needed = 0;
1681 params.sg_len = 0;
1682 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1683 req_ctx->result = 1;
44fce12a 1684 params.bfr_len = req_ctx->reqlen;
324429d7 1685 req_ctx->data_len += params.bfr_len + params.sg_len;
44fce12a
HJ
1686 if (req_ctx->reqlen == 0) {
1687 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1688 params.last = 0;
1689 params.more = 1;
1690 params.scmd1 = 0;
1691 params.bfr_len = bs;
1692
1693 } else {
1694 params.scmd1 = req_ctx->data_len;
1695 params.last = 1;
1696 params.more = 0;
1697 }
358961d1 1698 skb = create_hash_wr(req, &params);
40cdbe1a
YG
1699 if (IS_ERR(skb))
1700 return PTR_ERR(skb);
358961d1 1701
324429d7 1702 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1703 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1704 chcr_send_wr(skb);
1705 return -EINPROGRESS;
1706}
1707
1708static int chcr_ahash_finup(struct ahash_request *req)
1709{
1710 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1711 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1712 struct uld_ctx *u_ctx = NULL;
1713 struct sk_buff *skb;
1714 struct hash_wr_param params;
1715 u8 bs;
2f47d580 1716 int error;
324429d7
HS
1717
1718 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2f47d580 1719 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7
HS
1720
1721 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1722 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1723 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1724 return -EBUSY;
1725 }
1726
1727 if (is_hmac(crypto_ahash_tfm(rtfm)))
1728 params.opad_needed = 1;
1729 else
1730 params.opad_needed = 0;
1731
1732 params.sg_len = req->nbytes;
44fce12a 1733 params.bfr_len = req_ctx->reqlen;
324429d7
HS
1734 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1735 req_ctx->data_len += params.bfr_len + params.sg_len;
1736 req_ctx->result = 1;
44fce12a
HJ
1737 if ((req_ctx->reqlen + req->nbytes) == 0) {
1738 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1739 params.last = 0;
1740 params.more = 1;
1741 params.scmd1 = 0;
1742 params.bfr_len = bs;
1743 } else {
1744 params.scmd1 = req_ctx->data_len;
1745 params.last = 1;
1746 params.more = 0;
1747 }
2f47d580
HJ
1748 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1749 if (error)
1750 return -ENOMEM;
324429d7 1751
358961d1 1752 skb = create_hash_wr(req, &params);
2f47d580
HJ
1753 if (IS_ERR(skb)) {
1754 error = PTR_ERR(skb);
1755 goto unmap;
1756 }
324429d7 1757 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1758 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1759 chcr_send_wr(skb);
1760
1761 return -EINPROGRESS;
2f47d580
HJ
1762unmap:
1763 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1764 return error;
324429d7
HS
1765}
1766
1767static int chcr_ahash_digest(struct ahash_request *req)
1768{
1769 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1770 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
324429d7
HS
1771 struct uld_ctx *u_ctx = NULL;
1772 struct sk_buff *skb;
1773 struct hash_wr_param params;
1774 u8 bs;
2f47d580 1775 int error;
324429d7
HS
1776
1777 rtfm->init(req);
1778 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1779
2f47d580 1780 u_ctx = ULD_CTX(h_ctx(rtfm));
324429d7 1781 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 1782 h_ctx(rtfm)->tx_qidx))) {
324429d7
HS
1783 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1784 return -EBUSY;
1785 }
1786
1787 if (is_hmac(crypto_ahash_tfm(rtfm)))
1788 params.opad_needed = 1;
1789 else
1790 params.opad_needed = 0;
2f47d580
HJ
1791 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1792 if (error)
1793 return -ENOMEM;
324429d7
HS
1794
1795 params.last = 0;
1796 params.more = 0;
1797 params.sg_len = req->nbytes;
1798 params.bfr_len = 0;
1799 params.scmd1 = 0;
1800 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1801 req_ctx->result = 1;
1802 req_ctx->data_len += params.bfr_len + params.sg_len;
1803
44fce12a
HJ
1804 if (req->nbytes == 0) {
1805 create_last_hash_block(req_ctx->reqbfr, bs, 0);
324429d7
HS
1806 params.more = 1;
1807 params.bfr_len = bs;
1808 }
1809
358961d1 1810 skb = create_hash_wr(req, &params);
2f47d580
HJ
1811 if (IS_ERR(skb)) {
1812 error = PTR_ERR(skb);
1813 goto unmap;
1814 }
324429d7 1815 skb->dev = u_ctx->lldi.ports[0];
2f47d580 1816 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
324429d7
HS
1817 chcr_send_wr(skb);
1818 return -EINPROGRESS;
2f47d580
HJ
1819unmap:
1820 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1821 return error;
324429d7
HS
1822}
1823
1824static int chcr_ahash_export(struct ahash_request *areq, void *out)
1825{
1826 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1827 struct chcr_ahash_req_ctx *state = out;
1828
44fce12a 1829 state->reqlen = req_ctx->reqlen;
324429d7 1830 state->data_len = req_ctx->data_len;
2f47d580
HJ
1831 state->is_sg_map = 0;
1832 state->result = 0;
44fce12a 1833 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
1834 memcpy(state->partial_hash, req_ctx->partial_hash,
1835 CHCR_HASH_MAX_DIGEST_SIZE);
44fce12a 1836 return 0;
324429d7
HS
1837}
1838
1839static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1840{
1841 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1842 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1843
44fce12a 1844 req_ctx->reqlen = state->reqlen;
324429d7 1845 req_ctx->data_len = state->data_len;
44fce12a
HJ
1846 req_ctx->reqbfr = req_ctx->bfr1;
1847 req_ctx->skbfr = req_ctx->bfr2;
2f47d580
HJ
1848 req_ctx->is_sg_map = 0;
1849 req_ctx->result = 0;
44fce12a 1850 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
1851 memcpy(req_ctx->partial_hash, state->partial_hash,
1852 CHCR_HASH_MAX_DIGEST_SIZE);
1853 return 0;
1854}
1855
1856static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1857 unsigned int keylen)
1858{
2f47d580 1859 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7
HS
1860 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1861 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1862 unsigned int i, err = 0, updated_digestsize;
1863
e7922729
HJ
1864 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1865
1866 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
1867 * first request's data. opad will be sent with the final hash result
1868 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1869 */
e7922729
HJ
1870 shash->tfm = hmacctx->base_hash;
1871 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
324429d7 1872 if (keylen > bs) {
e7922729 1873 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
1874 hmacctx->ipad);
1875 if (err)
1876 goto out;
1877 keylen = digestsize;
1878 } else {
1879 memcpy(hmacctx->ipad, key, keylen);
1880 }
1881 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1882 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1883
1884 for (i = 0; i < bs / sizeof(int); i++) {
1885 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1886 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1887 }
1888
1889 updated_digestsize = digestsize;
1890 if (digestsize == SHA224_DIGEST_SIZE)
1891 updated_digestsize = SHA256_DIGEST_SIZE;
1892 else if (digestsize == SHA384_DIGEST_SIZE)
1893 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 1894 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
1895 hmacctx->ipad, digestsize);
1896 if (err)
1897 goto out;
1898 chcr_change_order(hmacctx->ipad, updated_digestsize);
1899
e7922729 1900 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
1901 hmacctx->opad, digestsize);
1902 if (err)
1903 goto out;
1904 chcr_change_order(hmacctx->opad, updated_digestsize);
1905out:
1906 return err;
1907}
1908
b8fd1f41 1909static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
324429d7
HS
1910 unsigned int key_len)
1911{
2f47d580 1912 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7 1913 unsigned short context_size = 0;
b8fd1f41 1914 int err;
324429d7 1915
b8fd1f41
HJ
1916 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1917 if (err)
1918 goto badkey_err;
cc1b156d
HJ
1919
1920 memcpy(ablkctx->key, key, key_len);
1921 ablkctx->enckey_len = key_len;
1922 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1923 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1924 ablkctx->key_ctx_hdr =
1925 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1926 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1927 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1928 CHCR_KEYCTX_NO_KEY, 1,
1929 0, context_size);
1930 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1931 return 0;
b8fd1f41
HJ
1932badkey_err:
1933 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1934 ablkctx->enckey_len = 0;
1935
1936 return err;
324429d7
HS
1937}
1938
1939static int chcr_sha_init(struct ahash_request *areq)
1940{
1941 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1942 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1943 int digestsize = crypto_ahash_digestsize(tfm);
1944
1945 req_ctx->data_len = 0;
44fce12a
HJ
1946 req_ctx->reqlen = 0;
1947 req_ctx->reqbfr = req_ctx->bfr1;
1948 req_ctx->skbfr = req_ctx->bfr2;
324429d7
HS
1949 req_ctx->skb = NULL;
1950 req_ctx->result = 0;
2f47d580 1951 req_ctx->is_sg_map = 0;
324429d7
HS
1952 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1953 return 0;
1954}
1955
1956static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1957{
1958 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1959 sizeof(struct chcr_ahash_req_ctx));
1960 return chcr_device_init(crypto_tfm_ctx(tfm));
1961}
1962
1963static int chcr_hmac_init(struct ahash_request *areq)
1964{
1965 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1966 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2f47d580 1967 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
324429d7
HS
1968 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1969 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1970
1971 chcr_sha_init(areq);
1972 req_ctx->data_len = bs;
1973 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1974 if (digestsize == SHA224_DIGEST_SIZE)
1975 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1976 SHA256_DIGEST_SIZE);
1977 else if (digestsize == SHA384_DIGEST_SIZE)
1978 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1979 SHA512_DIGEST_SIZE);
1980 else
1981 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1982 digestsize);
1983 }
1984 return 0;
1985}
1986
1987static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1988{
1989 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1990 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1991 unsigned int digestsize =
1992 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1993
1994 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1995 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
1996 hmacctx->base_hash = chcr_alloc_shash(digestsize);
1997 if (IS_ERR(hmacctx->base_hash))
1998 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
1999 return chcr_device_init(crypto_tfm_ctx(tfm));
2000}
2001
324429d7
HS
2002static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2003{
2004 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2005 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2006
e7922729
HJ
2007 if (hmacctx->base_hash) {
2008 chcr_free_shash(hmacctx->base_hash);
2009 hmacctx->base_hash = NULL;
324429d7
HS
2010 }
2011}
2012
2f47d580
HJ
2013static int chcr_aead_common_init(struct aead_request *req,
2014 unsigned short op_type)
2debd332 2015{
2f47d580
HJ
2016 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2017 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2018 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2019 int error = -EINVAL;
2020 unsigned int dst_size;
2021 unsigned int authsize = crypto_aead_authsize(tfm);
2debd332 2022
2f47d580
HJ
2023 dst_size = req->assoclen + req->cryptlen + (op_type ?
2024 -authsize : authsize);
2025 /* validate key size */
2026 if (aeadctx->enckey_len == 0)
2027 goto err;
2028 if (op_type && req->cryptlen < authsize)
2029 goto err;
2030 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2031 op_type);
2032 if (error) {
2033 error = -ENOMEM;
2034 goto err;
2035 }
2036 reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2037 CHCR_SRC_SG_SIZE, 0);
2038 reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2039 CHCR_SRC_SG_SIZE, req->assoclen);
2040 return 0;
2041err:
2042 return error;
2debd332 2043}
2f47d580
HJ
2044
2045static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
0e93708d
HJ
2046 int aadmax, int wrlen,
2047 unsigned short op_type)
2048{
2049 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2050
2051 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2f47d580 2052 dst_nents > MAX_DSGL_ENT ||
0e93708d 2053 (req->assoclen > aadmax) ||
2f47d580 2054 (wrlen > SGE_MAX_WR_LEN))
0e93708d
HJ
2055 return 1;
2056 return 0;
2057}
2debd332 2058
0e93708d
HJ
2059static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2060{
2061 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2062 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
2063 struct aead_request *subreq = aead_request_ctx(req);
2064
2065 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2066 aead_request_set_callback(subreq, req->base.flags,
2067 req->base.complete, req->base.data);
2068 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2069 req->iv);
2070 aead_request_set_ad(subreq, req->assoclen);
2071 return op_type ? crypto_aead_decrypt(subreq) :
2072 crypto_aead_encrypt(subreq);
2073}
2debd332
HJ
2074
2075static struct sk_buff *create_authenc_wr(struct aead_request *req,
2076 unsigned short qid,
2077 int size,
2078 unsigned short op_type)
2079{
2080 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2081 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2082 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2083 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2084 struct sk_buff *skb = NULL;
2085 struct chcr_wr *chcr_req;
2086 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2087 struct ulptx_sgl *ulptx;
2088 unsigned int transhdr_len;
2089 unsigned int dst_size = 0, temp;
2090 unsigned int kctx_len = 0, dnents;
2debd332
HJ
2091 unsigned int assoclen = req->assoclen;
2092 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2093 int error = -EINVAL;
2debd332
HJ
2094 int null = 0;
2095 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2096 GFP_ATOMIC;
2f47d580 2097 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2098
2f47d580
HJ
2099 if (req->cryptlen == 0)
2100 return NULL;
2debd332 2101
2f47d580 2102 reqctx->b0_dma = 0;
2debd332
HJ
2103 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
2104 null = 1;
2105 assoclen = 0;
2106 }
2f47d580
HJ
2107 dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2108 authsize);
2109 error = chcr_aead_common_init(req, op_type);
2110 if (error)
2111 return ERR_PTR(error);
2112 if (dst_size) {
2113 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2114 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2115 (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2116 req->assoclen);
2117 dnents += MIN_AUTH_SG; // For IV
2118 } else {
2119 dnents = 0;
2debd332 2120 }
2f47d580
HJ
2121
2122 dst_size = get_space_for_phys_dsgl(dnents);
2debd332
HJ
2123 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2124 - sizeof(chcr_req->key_ctx);
2125 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2126 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2127 SGE_MAX_WR_LEN;
2128 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
2129 * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2130 + MIN_GCM_SG) * 8);
2131 transhdr_len += temp;
2132 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2133
2134 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2135 transhdr_len, op_type)) {
ee0863ba 2136 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2137 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2138 op_type);
0e93708d
HJ
2139 return ERR_PTR(chcr_aead_fallback(req, op_type));
2140 }
2f47d580 2141 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
5fe8c711
HJ
2142 if (!skb) {
2143 error = -ENOMEM;
2debd332 2144 goto err;
5fe8c711 2145 }
2debd332 2146
de77b966 2147 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 2148
2f47d580 2149 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332
HJ
2150
2151 /*
2152 * Input order is AAD,IV and Payload. where IV should be included as
2153 * the part of authdata. All other fields should be filled according
2154 * to the hardware spec
2155 */
2156 chcr_req->sec_cpl.op_ivinsrtofst =
2f47d580
HJ
2157 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2158 assoclen + 1);
2159 chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2debd332
HJ
2160 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2161 assoclen ? 1 : 0, assoclen,
2f47d580
HJ
2162 assoclen + IV + 1,
2163 (temp & 0x1F0) >> 4);
2debd332 2164 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2f47d580
HJ
2165 temp & 0xF,
2166 null ? 0 : assoclen + IV + 1,
2167 temp, temp);
2debd332
HJ
2168 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2169 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2170 CHCR_SCMD_CIPHER_MODE_AES_CBC,
2171 actx->auth_mode, aeadctx->hmac_ctrl,
2f47d580 2172 IV >> 1);
2debd332 2173 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 2174 0, 0, dst_size);
2debd332
HJ
2175
2176 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2177 if (op_type == CHCR_ENCRYPT_OP)
2178 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2179 aeadctx->enckey_len);
2180 else
2181 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2182 aeadctx->enckey_len);
2183
2184 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2185 4), actx->h_iopad, kctx_len -
2186 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2f47d580 2187 memcpy(reqctx->iv, req->iv, IV);
2debd332 2188 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
2189 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2190 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2191 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
ee0863ba 2192 atomic_inc(&adap->chcr_stats.cipher_rqst);
2f47d580
HJ
2193 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2194 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2195 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2196 transhdr_len, temp, 0);
2debd332 2197 reqctx->skb = skb;
2f47d580 2198 reqctx->op = op_type;
2debd332
HJ
2199
2200 return skb;
2debd332 2201err:
2f47d580
HJ
2202 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2203 op_type);
2204
5fe8c711 2205 return ERR_PTR(error);
2debd332
HJ
2206}
2207
2f47d580
HJ
2208static int chcr_aead_dma_map(struct device *dev,
2209 struct aead_request *req,
2210 unsigned short op_type)
2211{
2212 int error;
2213 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2214 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2215 unsigned int authsize = crypto_aead_authsize(tfm);
2216 int dst_size;
2217
2218 dst_size = req->assoclen + req->cryptlen + (op_type ?
2219 -authsize : authsize);
2220 if (!req->cryptlen || !dst_size)
2221 return 0;
2222 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2223 DMA_BIDIRECTIONAL);
2224 if (dma_mapping_error(dev, reqctx->iv_dma))
2225 return -ENOMEM;
2226
2227 if (req->src == req->dst) {
2228 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2229 DMA_BIDIRECTIONAL);
2230 if (!error)
2231 goto err;
2232 } else {
2233 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2234 DMA_TO_DEVICE);
2235 if (!error)
2236 goto err;
2237 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2238 DMA_FROM_DEVICE);
2239 if (!error) {
2240 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2241 DMA_TO_DEVICE);
2242 goto err;
2243 }
2244 }
2245
2246 return 0;
2247err:
2248 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2249 return -ENOMEM;
2250}
2251
2252static void chcr_aead_dma_unmap(struct device *dev,
2253 struct aead_request *req,
2254 unsigned short op_type)
2255{
2256 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2257 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2258 unsigned int authsize = crypto_aead_authsize(tfm);
2259 int dst_size;
2260
2261 dst_size = req->assoclen + req->cryptlen + (op_type ?
2262 -authsize : authsize);
2263 if (!req->cryptlen || !dst_size)
2264 return;
2265
2266 dma_unmap_single(dev, reqctx->iv_dma, IV,
2267 DMA_BIDIRECTIONAL);
2268 if (req->src == req->dst) {
2269 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2270 DMA_BIDIRECTIONAL);
2271 } else {
2272 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2273 DMA_TO_DEVICE);
2274 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2275 DMA_FROM_DEVICE);
2276 }
2277}
2278
2279static inline void chcr_add_aead_src_ent(struct aead_request *req,
2280 struct ulptx_sgl *ulptx,
2281 unsigned int assoclen,
2282 unsigned short op_type)
2283{
2284 struct ulptx_walk ulp_walk;
2285 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2286
2287 if (reqctx->imm) {
2288 u8 *buf = (u8 *)ulptx;
2289
2290 if (reqctx->b0_dma) {
2291 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2292 buf += reqctx->b0_len;
2293 }
2294 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2295 buf, assoclen, 0);
2296 buf += assoclen;
2297 memcpy(buf, reqctx->iv, IV);
2298 buf += IV;
2299 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2300 buf, req->cryptlen, req->assoclen);
2301 } else {
2302 ulptx_walk_init(&ulp_walk, ulptx);
2303 if (reqctx->b0_dma)
2304 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2305 &reqctx->b0_dma);
2306 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2307 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2308 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2309 req->assoclen);
2310 ulptx_walk_end(&ulp_walk);
2311 }
2312}
2313
2314static inline void chcr_add_aead_dst_ent(struct aead_request *req,
2315 struct cpl_rx_phys_dsgl *phys_cpl,
2316 unsigned int assoclen,
2317 unsigned short op_type,
2318 unsigned short qid)
2319{
2320 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2321 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2322 struct dsgl_walk dsgl_walk;
2323 unsigned int authsize = crypto_aead_authsize(tfm);
2324 u32 temp;
2325
2326 dsgl_walk_init(&dsgl_walk, phys_cpl);
2327 if (reqctx->b0_dma)
2328 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2329 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2330 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2331 temp = req->cryptlen + (op_type ? -authsize : authsize);
2332 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2333 dsgl_walk_end(&dsgl_walk, qid);
2334}
2335
2336static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2337 struct ulptx_sgl *ulptx,
2338 struct cipher_wr_param *wrparam)
2339{
2340 struct ulptx_walk ulp_walk;
2341 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2342
2343 if (reqctx->imm) {
2344 u8 *buf = (u8 *)ulptx;
2345
2346 memcpy(buf, reqctx->iv, IV);
2347 buf += IV;
2348 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2349 buf, wrparam->bytes, reqctx->processed);
2350 } else {
2351 ulptx_walk_init(&ulp_walk, ulptx);
2352 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2353 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2354 reqctx->src_ofst);
2355 reqctx->srcsg = ulp_walk.last_sg;
2356 reqctx->src_ofst = ulp_walk.last_sg_len;
2357 ulptx_walk_end(&ulp_walk);
2358 }
2359}
2360
2361static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2362 struct cpl_rx_phys_dsgl *phys_cpl,
2363 struct cipher_wr_param *wrparam,
2364 unsigned short qid)
2365{
2366 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2367 struct dsgl_walk dsgl_walk;
2368
2369 dsgl_walk_init(&dsgl_walk, phys_cpl);
2370 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2371 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2372 reqctx->dst_ofst);
2373 reqctx->dstsg = dsgl_walk.last_sg;
2374 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2375
2376 dsgl_walk_end(&dsgl_walk, qid);
2377}
2378
2379static inline void chcr_add_hash_src_ent(struct ahash_request *req,
2380 struct ulptx_sgl *ulptx,
2381 struct hash_wr_param *param)
2382{
2383 struct ulptx_walk ulp_walk;
2384 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2385
2386 if (reqctx->imm) {
2387 u8 *buf = (u8 *)ulptx;
2388
2389 if (param->bfr_len) {
2390 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2391 buf += param->bfr_len;
2392 }
2393 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2394 buf, param->sg_len, 0);
2395 } else {
2396 ulptx_walk_init(&ulp_walk, ulptx);
2397 if (param->bfr_len)
2398 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2399 &reqctx->dma_addr);
2400 ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
2401 0);
2402// reqctx->srcsg = ulp_walk.last_sg;
2403// reqctx->src_ofst = ulp_walk.last_sg_len;
2404 ulptx_walk_end(&ulp_walk);
2405 }
2406}
2407
2408
2409static inline int chcr_hash_dma_map(struct device *dev,
2410 struct ahash_request *req)
2411{
2412 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2413 int error = 0;
2414
2415 if (!req->nbytes)
2416 return 0;
2417 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2418 DMA_TO_DEVICE);
2419 if (!error)
2420 return error;
2421 req_ctx->is_sg_map = 1;
2422 return 0;
2423}
2424
2425static inline void chcr_hash_dma_unmap(struct device *dev,
2426 struct ahash_request *req)
2427{
2428 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2429
2430 if (!req->nbytes)
2431 return;
2432
2433 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2434 DMA_TO_DEVICE);
2435 req_ctx->is_sg_map = 0;
2436
2437}
2438
2439
2440static int chcr_cipher_dma_map(struct device *dev,
2441 struct ablkcipher_request *req)
2442{
2443 int error;
2444 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2445
2446 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2447 DMA_BIDIRECTIONAL);
2448 if (dma_mapping_error(dev, reqctx->iv_dma))
2449 return -ENOMEM;
2450
2451 if (req->src == req->dst) {
2452 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2453 DMA_BIDIRECTIONAL);
2454 if (!error)
2455 goto err;
2456 } else {
2457 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2458 DMA_TO_DEVICE);
2459 if (!error)
2460 goto err;
2461 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2462 DMA_FROM_DEVICE);
2463 if (!error) {
2464 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2465 DMA_TO_DEVICE);
2466 goto err;
2467 }
2468 }
2469
2470 return 0;
2471err:
2472 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2473 return -ENOMEM;
2474}
2475static void chcr_cipher_dma_unmap(struct device *dev,
2476 struct ablkcipher_request *req)
2477{
2478 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2479
2480 dma_unmap_single(dev, reqctx->iv_dma, IV,
2481 DMA_BIDIRECTIONAL);
2482 if (req->src == req->dst) {
2483 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2484 DMA_BIDIRECTIONAL);
2485 } else {
2486 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2487 DMA_TO_DEVICE);
2488 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2489 DMA_FROM_DEVICE);
2490 }
2491}
2492
2debd332
HJ
2493static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2494{
2495 __be32 data;
2496
2497 memset(block, 0, csize);
2498 block += csize;
2499
2500 if (csize >= 4)
2501 csize = 4;
2502 else if (msglen > (unsigned int)(1 << (8 * csize)))
2503 return -EOVERFLOW;
2504
2505 data = cpu_to_be32(msglen);
2506 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2507
2508 return 0;
2509}
2510
2511static void generate_b0(struct aead_request *req,
2512 struct chcr_aead_ctx *aeadctx,
2513 unsigned short op_type)
2514{
2515 unsigned int l, lp, m;
2516 int rc;
2517 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2518 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2519 u8 *b0 = reqctx->scratch_pad;
2520
2521 m = crypto_aead_authsize(aead);
2522
2523 memcpy(b0, reqctx->iv, 16);
2524
2525 lp = b0[0];
2526 l = lp + 1;
2527
2528 /* set m, bits 3-5 */
2529 *b0 |= (8 * ((m - 2) / 2));
2530
2531 /* set adata, bit 6, if associated data is used */
2532 if (req->assoclen)
2533 *b0 |= 64;
2534 rc = set_msg_len(b0 + 16 - l,
2535 (op_type == CHCR_DECRYPT_OP) ?
2536 req->cryptlen - m : req->cryptlen, l);
2537}
2538
2539static inline int crypto_ccm_check_iv(const u8 *iv)
2540{
2541 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2542 if (iv[0] < 1 || iv[0] > 7)
2543 return -EINVAL;
2544
2545 return 0;
2546}
2547
2548static int ccm_format_packet(struct aead_request *req,
2549 struct chcr_aead_ctx *aeadctx,
2550 unsigned int sub_type,
2551 unsigned short op_type)
2552{
2553 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2554 int rc = 0;
2555
2debd332
HJ
2556 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2557 reqctx->iv[0] = 3;
2558 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2559 memcpy(reqctx->iv + 4, req->iv, 8);
2560 memset(reqctx->iv + 12, 0, 4);
2561 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2562 htons(req->assoclen - 8);
2563 } else {
2564 memcpy(reqctx->iv, req->iv, 16);
2565 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2566 htons(req->assoclen);
2567 }
2568 generate_b0(req, aeadctx, op_type);
2569 /* zero the ctr value */
2570 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2571 return rc;
2572}
2573
2574static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2575 unsigned int dst_size,
2576 struct aead_request *req,
2f47d580 2577 unsigned short op_type)
2debd332
HJ
2578{
2579 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2580 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2581 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2582 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2f47d580 2583 unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2debd332
HJ
2584 unsigned int ccm_xtra;
2585 unsigned char tag_offset = 0, auth_offset = 0;
2debd332
HJ
2586 unsigned int assoclen;
2587
2588 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2589 assoclen = req->assoclen - 8;
2590 else
2591 assoclen = req->assoclen;
2592 ccm_xtra = CCM_B0_SIZE +
2593 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2594
2595 auth_offset = req->cryptlen ?
2f47d580 2596 (assoclen + IV + 1 + ccm_xtra) : 0;
2debd332
HJ
2597 if (op_type == CHCR_DECRYPT_OP) {
2598 if (crypto_aead_authsize(tfm) != req->cryptlen)
2599 tag_offset = crypto_aead_authsize(tfm);
2600 else
2601 auth_offset = 0;
2602 }
2603
2604
2605 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2f47d580 2606 2, assoclen + 1 + ccm_xtra);
2debd332 2607 sec_cpl->pldlen =
2f47d580 2608 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2debd332
HJ
2609 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2610 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2611 1, assoclen + ccm_xtra, assoclen
2f47d580 2612 + IV + 1 + ccm_xtra, 0);
2debd332
HJ
2613
2614 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2615 auth_offset, tag_offset,
2616 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2617 crypto_aead_authsize(tfm));
2618 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2619 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
0a7bd30c 2620 cipher_mode, mac_mode,
2f47d580 2621 aeadctx->hmac_ctrl, IV >> 1);
2debd332
HJ
2622
2623 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2f47d580 2624 0, dst_size);
2debd332
HJ
2625}
2626
2627int aead_ccm_validate_input(unsigned short op_type,
2628 struct aead_request *req,
2629 struct chcr_aead_ctx *aeadctx,
2630 unsigned int sub_type)
2631{
2632 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2633 if (crypto_ccm_check_iv(req->iv)) {
2634 pr_err("CCM: IV check fails\n");
2635 return -EINVAL;
2636 }
2637 } else {
2638 if (req->assoclen != 16 && req->assoclen != 20) {
2639 pr_err("RFC4309: Invalid AAD length %d\n",
2640 req->assoclen);
2641 return -EINVAL;
2642 }
2643 }
2debd332
HJ
2644 return 0;
2645}
2646
2debd332
HJ
2647static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2648 unsigned short qid,
2649 int size,
2650 unsigned short op_type)
2651{
2652 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2653 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2654 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2655 struct sk_buff *skb = NULL;
2656 struct chcr_wr *chcr_req;
2657 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2658 struct ulptx_sgl *ulptx;
2659 unsigned int transhdr_len;
2660 unsigned int dst_size = 0, kctx_len, dnents, temp;
2661 unsigned int sub_type, assoclen = req->assoclen;
2debd332 2662 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2663 int error = -EINVAL;
2debd332
HJ
2664 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2665 GFP_ATOMIC;
2f47d580 2666 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2667
2f47d580
HJ
2668 reqctx->b0_dma = 0;
2669 sub_type = get_aead_subtype(tfm);
2670 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2671 assoclen -= 8;
2672 dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
738bff48 2673 authsize);
2f47d580
HJ
2674 error = chcr_aead_common_init(req, op_type);
2675 if (error)
2676 return ERR_PTR(error);
0e93708d 2677
2f47d580
HJ
2678
2679 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
5fe8c711
HJ
2680 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2681 if (error)
2debd332 2682 goto err;
2f47d580
HJ
2683 if (dst_size) {
2684 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2685 dnents += sg_nents_xlen(req->dst, req->cryptlen
2686 + (op_type ? -authsize : authsize),
2687 CHCR_DST_SG_SIZE, req->assoclen);
2688 dnents += MIN_CCM_SG; // For IV and B0
2689 } else {
2690 dnents = 0;
2691 }
2692 dst_size = get_space_for_phys_dsgl(dnents);
2debd332
HJ
2693 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2694 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2695 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2696 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2697 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
2698 reqctx->b0_len), 16) * 16) :
2699 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2700 MIN_CCM_SG) * 8);
2701 transhdr_len += temp;
2702 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2703
2704 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2705 reqctx->b0_len, transhdr_len, op_type)) {
ee0863ba 2706 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2707 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2708 op_type);
0e93708d
HJ
2709 return ERR_PTR(chcr_aead_fallback(req, op_type));
2710 }
2f47d580 2711 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2debd332 2712
5fe8c711
HJ
2713 if (!skb) {
2714 error = -ENOMEM;
2debd332 2715 goto err;
5fe8c711 2716 }
2debd332 2717
2f47d580 2718 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2debd332 2719
2f47d580 2720 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2debd332
HJ
2721
2722 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2723 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2724 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2725 16), aeadctx->key, aeadctx->enckey_len);
2726
2727 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580 2728 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
5fe8c711
HJ
2729 error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2730 if (error)
2debd332
HJ
2731 goto dstmap_fail;
2732
2f47d580
HJ
2733 reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2734 &reqctx->scratch_pad, reqctx->b0_len,
2735 DMA_BIDIRECTIONAL);
2736 if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2737 reqctx->b0_dma)) {
2738 error = -ENOMEM;
2debd332 2739 goto dstmap_fail;
2f47d580
HJ
2740 }
2741
2742 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2743 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2debd332 2744
ee0863ba 2745 atomic_inc(&adap->chcr_stats.aead_rqst);
2f47d580
HJ
2746 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2747 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2748 reqctx->b0_len) : 0);
2749 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2750 transhdr_len, temp, 0);
2debd332 2751 reqctx->skb = skb;
2f47d580
HJ
2752 reqctx->op = op_type;
2753
2debd332
HJ
2754 return skb;
2755dstmap_fail:
2756 kfree_skb(skb);
2debd332 2757err:
2f47d580 2758 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
5fe8c711 2759 return ERR_PTR(error);
2debd332
HJ
2760}
2761
2762static struct sk_buff *create_gcm_wr(struct aead_request *req,
2763 unsigned short qid,
2764 int size,
2765 unsigned short op_type)
2766{
2767 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2768 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2769 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2770 struct sk_buff *skb = NULL;
2771 struct chcr_wr *chcr_req;
2772 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2773 struct ulptx_sgl *ulptx;
2774 unsigned int transhdr_len, dnents = 0;
2775 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2debd332 2776 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2777 int error = -EINVAL;
2debd332
HJ
2778 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2779 GFP_ATOMIC;
2f47d580 2780 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 2781
2f47d580
HJ
2782 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2783 assoclen = req->assoclen - 8;
2debd332 2784
2f47d580
HJ
2785 reqctx->b0_dma = 0;
2786 dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
2787 error = chcr_aead_common_init(req, op_type);
5fe8c711
HJ
2788 if (error)
2789 return ERR_PTR(error);
2f47d580
HJ
2790 if (dst_size) {
2791 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2792 dnents += sg_nents_xlen(req->dst,
2793 req->cryptlen + (op_type ? -authsize : authsize),
2794 CHCR_DST_SG_SIZE, req->assoclen);
2795 dnents += MIN_GCM_SG; // For IV
2796 } else {
2797 dnents = 0;
2debd332 2798 }
2f47d580 2799 dst_size = get_space_for_phys_dsgl(dnents);
2debd332
HJ
2800 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2801 AEAD_H_SIZE;
2802 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2f47d580
HJ
2803 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2804 SGE_MAX_WR_LEN;
2805 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
2806 req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
2807 reqctx->aad_nents + MIN_GCM_SG) * 8);
2808 transhdr_len += temp;
2809 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2810 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2811 transhdr_len, op_type)) {
ee0863ba 2812 atomic_inc(&adap->chcr_stats.fallback);
2f47d580
HJ
2813 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2814 op_type);
0e93708d
HJ
2815 return ERR_PTR(chcr_aead_fallback(req, op_type));
2816 }
2f47d580 2817 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
5fe8c711
HJ
2818 if (!skb) {
2819 error = -ENOMEM;
2debd332 2820 goto err;
5fe8c711 2821 }
2debd332 2822
de77b966 2823 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 2824
2f47d580
HJ
2825 //Offset of tag from end
2826 temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332 2827 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2f47d580
HJ
2828 a_ctx(tfm)->dev->rx_channel_id, 2,
2829 (assoclen + 1));
0e93708d 2830 chcr_req->sec_cpl.pldlen =
2f47d580 2831 htonl(assoclen + IV + req->cryptlen);
2debd332 2832 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
d600fc8a 2833 assoclen ? 1 : 0, assoclen,
2f47d580 2834 assoclen + IV + 1, 0);
2debd332 2835 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2f47d580
HJ
2836 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2837 temp, temp);
2debd332
HJ
2838 chcr_req->sec_cpl.seqno_numivs =
2839 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2840 CHCR_ENCRYPT_OP) ? 1 : 0,
2841 CHCR_SCMD_CIPHER_MODE_AES_GCM,
0a7bd30c 2842 CHCR_SCMD_AUTH_MODE_GHASH,
2f47d580 2843 aeadctx->hmac_ctrl, IV >> 1);
2debd332 2844 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 2845 0, 0, dst_size);
2debd332
HJ
2846 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2847 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2848 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2849 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2850
2851 /* prepare a 16 byte iv */
2852 /* S A L T | IV | 0x00000001 */
2853 if (get_aead_subtype(tfm) ==
2854 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2855 memcpy(reqctx->iv, aeadctx->salt, 4);
8f6acb7f 2856 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
2debd332 2857 } else {
8f6acb7f 2858 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
2debd332
HJ
2859 }
2860 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2861
2862 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580 2863 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2debd332 2864
2f47d580
HJ
2865 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2866 chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
ee0863ba 2867 atomic_inc(&adap->chcr_stats.aead_rqst);
2f47d580
HJ
2868 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2869 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2870 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2871 transhdr_len, temp, reqctx->verify);
2debd332 2872 reqctx->skb = skb;
2f47d580 2873 reqctx->op = op_type;
2debd332
HJ
2874 return skb;
2875
2debd332 2876err:
2f47d580 2877 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
5fe8c711 2878 return ERR_PTR(error);
2debd332
HJ
2879}
2880
2881
2882
2883static int chcr_aead_cra_init(struct crypto_aead *tfm)
2884{
2f47d580 2885 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
2886 struct aead_alg *alg = crypto_aead_alg(tfm);
2887
2888 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
5fe8c711
HJ
2889 CRYPTO_ALG_NEED_FALLBACK |
2890 CRYPTO_ALG_ASYNC);
0e93708d
HJ
2891 if (IS_ERR(aeadctx->sw_cipher))
2892 return PTR_ERR(aeadctx->sw_cipher);
2893 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2894 sizeof(struct aead_request) +
2895 crypto_aead_reqsize(aeadctx->sw_cipher)));
2f47d580 2896 return chcr_device_init(a_ctx(tfm));
2debd332
HJ
2897}
2898
2899static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2900{
2f47d580 2901 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d 2902
0e93708d 2903 crypto_free_aead(aeadctx->sw_cipher);
2debd332
HJ
2904}
2905
2906static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2907 unsigned int authsize)
2908{
2f47d580 2909 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2910
2911 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2912 aeadctx->mayverify = VERIFY_HW;
0e93708d 2913 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2914}
2915static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2916 unsigned int authsize)
2917{
2f47d580 2918 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2919 u32 maxauth = crypto_aead_maxauthsize(tfm);
2920
2921 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2922 * true for sha1. authsize == 12 condition should be before
2923 * authsize == (maxauth >> 1)
2924 */
2925 if (authsize == ICV_4) {
2926 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2927 aeadctx->mayverify = VERIFY_HW;
2928 } else if (authsize == ICV_6) {
2929 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2930 aeadctx->mayverify = VERIFY_HW;
2931 } else if (authsize == ICV_10) {
2932 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2933 aeadctx->mayverify = VERIFY_HW;
2934 } else if (authsize == ICV_12) {
2935 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2936 aeadctx->mayverify = VERIFY_HW;
2937 } else if (authsize == ICV_14) {
2938 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2939 aeadctx->mayverify = VERIFY_HW;
2940 } else if (authsize == (maxauth >> 1)) {
2941 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2942 aeadctx->mayverify = VERIFY_HW;
2943 } else if (authsize == maxauth) {
2944 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2945 aeadctx->mayverify = VERIFY_HW;
2946 } else {
2947 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2948 aeadctx->mayverify = VERIFY_SW;
2949 }
0e93708d 2950 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2951}
2952
2953
2954static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2955{
2f47d580 2956 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2957
2958 switch (authsize) {
2959 case ICV_4:
2960 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2961 aeadctx->mayverify = VERIFY_HW;
2962 break;
2963 case ICV_8:
2964 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2965 aeadctx->mayverify = VERIFY_HW;
2966 break;
2967 case ICV_12:
2968 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2969 aeadctx->mayverify = VERIFY_HW;
2970 break;
2971 case ICV_14:
2972 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2973 aeadctx->mayverify = VERIFY_HW;
2974 break;
2975 case ICV_16:
2976 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2977 aeadctx->mayverify = VERIFY_HW;
2978 break;
2979 case ICV_13:
2980 case ICV_15:
2981 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2982 aeadctx->mayverify = VERIFY_SW;
2983 break;
2984 default:
2985
2986 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2987 CRYPTO_TFM_RES_BAD_KEY_LEN);
2988 return -EINVAL;
2989 }
0e93708d 2990 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2991}
2992
2993static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2994 unsigned int authsize)
2995{
2f47d580 2996 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2997
2998 switch (authsize) {
2999 case ICV_8:
3000 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3001 aeadctx->mayverify = VERIFY_HW;
3002 break;
3003 case ICV_12:
3004 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3005 aeadctx->mayverify = VERIFY_HW;
3006 break;
3007 case ICV_16:
3008 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3009 aeadctx->mayverify = VERIFY_HW;
3010 break;
3011 default:
3012 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3013 CRYPTO_TFM_RES_BAD_KEY_LEN);
3014 return -EINVAL;
3015 }
0e93708d 3016 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3017}
3018
3019static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3020 unsigned int authsize)
3021{
2f47d580 3022 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3023
3024 switch (authsize) {
3025 case ICV_4:
3026 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3027 aeadctx->mayverify = VERIFY_HW;
3028 break;
3029 case ICV_6:
3030 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3031 aeadctx->mayverify = VERIFY_HW;
3032 break;
3033 case ICV_8:
3034 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3035 aeadctx->mayverify = VERIFY_HW;
3036 break;
3037 case ICV_10:
3038 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3039 aeadctx->mayverify = VERIFY_HW;
3040 break;
3041 case ICV_12:
3042 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3043 aeadctx->mayverify = VERIFY_HW;
3044 break;
3045 case ICV_14:
3046 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3047 aeadctx->mayverify = VERIFY_HW;
3048 break;
3049 case ICV_16:
3050 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3051 aeadctx->mayverify = VERIFY_HW;
3052 break;
3053 default:
3054 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3055 CRYPTO_TFM_RES_BAD_KEY_LEN);
3056 return -EINVAL;
3057 }
0e93708d 3058 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3059}
3060
0e93708d 3061static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2debd332
HJ
3062 const u8 *key,
3063 unsigned int keylen)
3064{
2f47d580 3065 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332
HJ
3066 unsigned char ck_size, mk_size;
3067 int key_ctx_size = 0;
3068
2debd332
HJ
3069 key_ctx_size = sizeof(struct _key_ctx) +
3070 ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
3071 if (keylen == AES_KEYSIZE_128) {
3072 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3073 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3074 } else if (keylen == AES_KEYSIZE_192) {
3075 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3076 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3077 } else if (keylen == AES_KEYSIZE_256) {
3078 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3079 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3080 } else {
3081 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3082 CRYPTO_TFM_RES_BAD_KEY_LEN);
3083 aeadctx->enckey_len = 0;
3084 return -EINVAL;
3085 }
3086 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3087 key_ctx_size >> 4);
0e93708d
HJ
3088 memcpy(aeadctx->key, key, keylen);
3089 aeadctx->enckey_len = keylen;
3090
2debd332
HJ
3091 return 0;
3092}
3093
0e93708d
HJ
3094static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3095 const u8 *key,
3096 unsigned int keylen)
3097{
2f47d580 3098 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
0e93708d
HJ
3099 int error;
3100
3101 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3102 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3103 CRYPTO_TFM_REQ_MASK);
3104 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3105 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3106 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3107 CRYPTO_TFM_RES_MASK);
3108 if (error)
3109 return error;
3110 return chcr_ccm_common_setkey(aead, key, keylen);
3111}
3112
2debd332
HJ
3113static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3114 unsigned int keylen)
3115{
2f47d580 3116 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
4dbeae42 3117 int error;
2debd332
HJ
3118
3119 if (keylen < 3) {
3120 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3121 CRYPTO_TFM_RES_BAD_KEY_LEN);
3122 aeadctx->enckey_len = 0;
3123 return -EINVAL;
3124 }
4dbeae42
HJ
3125 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3126 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3127 CRYPTO_TFM_REQ_MASK);
3128 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3129 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3130 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3131 CRYPTO_TFM_RES_MASK);
3132 if (error)
3133 return error;
2debd332
HJ
3134 keylen -= 3;
3135 memcpy(aeadctx->salt, key + keylen, 3);
0e93708d 3136 return chcr_ccm_common_setkey(aead, key, keylen);
2debd332
HJ
3137}
3138
3139static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3140 unsigned int keylen)
3141{
2f47d580 3142 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332 3143 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
8356ea51 3144 struct crypto_cipher *cipher;
2debd332
HJ
3145 unsigned int ck_size;
3146 int ret = 0, key_ctx_size = 0;
3147
0e93708d
HJ
3148 aeadctx->enckey_len = 0;
3149 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3150 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3151 & CRYPTO_TFM_REQ_MASK);
3152 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3153 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3154 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3155 CRYPTO_TFM_RES_MASK);
3156 if (ret)
3157 goto out;
3158
7c2cf1c4
HJ
3159 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3160 keylen > 3) {
2debd332
HJ
3161 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3162 memcpy(aeadctx->salt, key + keylen, 4);
3163 }
3164 if (keylen == AES_KEYSIZE_128) {
3165 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3166 } else if (keylen == AES_KEYSIZE_192) {
3167 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3168 } else if (keylen == AES_KEYSIZE_256) {
3169 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3170 } else {
3171 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3172 CRYPTO_TFM_RES_BAD_KEY_LEN);
0e93708d 3173 pr_err("GCM: Invalid key length %d\n", keylen);
2debd332
HJ
3174 ret = -EINVAL;
3175 goto out;
3176 }
3177
3178 memcpy(aeadctx->key, key, keylen);
3179 aeadctx->enckey_len = keylen;
3180 key_ctx_size = sizeof(struct _key_ctx) +
3181 ((DIV_ROUND_UP(keylen, 16)) << 4) +
3182 AEAD_H_SIZE;
3183 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3184 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3185 0, 0,
3186 key_ctx_size >> 4);
8356ea51
HJ
3187 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3188 * It will go in key context
2debd332 3189 */
8356ea51
HJ
3190 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3191 if (IS_ERR(cipher)) {
2debd332
HJ
3192 aeadctx->enckey_len = 0;
3193 ret = -ENOMEM;
3194 goto out;
3195 }
8356ea51
HJ
3196
3197 ret = crypto_cipher_setkey(cipher, key, keylen);
2debd332
HJ
3198 if (ret) {
3199 aeadctx->enckey_len = 0;
3200 goto out1;
3201 }
3202 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
8356ea51 3203 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2debd332
HJ
3204
3205out1:
8356ea51 3206 crypto_free_cipher(cipher);
2debd332
HJ
3207out:
3208 return ret;
3209}
3210
3211static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3212 unsigned int keylen)
3213{
2f47d580 3214 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3215 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3216 /* it contains auth and cipher key both*/
3217 struct crypto_authenc_keys keys;
3218 unsigned int bs;
3219 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3220 int err = 0, i, key_ctx_len = 0;
3221 unsigned char ck_size = 0;
3222 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
ec1bca94 3223 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2debd332
HJ
3224 struct algo_param param;
3225 int align;
3226 u8 *o_ptr = NULL;
3227
0e93708d
HJ
3228 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3229 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3230 & CRYPTO_TFM_REQ_MASK);
3231 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3232 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3233 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3234 & CRYPTO_TFM_RES_MASK);
3235 if (err)
3236 goto out;
3237
2debd332
HJ
3238 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3239 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3240 goto out;
3241 }
3242
3243 if (get_alg_config(&param, max_authsize)) {
3244 pr_err("chcr : Unsupported digest size\n");
3245 goto out;
3246 }
3247 if (keys.enckeylen == AES_KEYSIZE_128) {
3248 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3249 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3250 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3251 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3252 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3253 } else {
3254 pr_err("chcr : Unsupported cipher key\n");
3255 goto out;
3256 }
3257
3258 /* Copy only encryption key. We use authkey to generate h(ipad) and
3259 * h(opad) so authkey is not needed again. authkeylen size have the
3260 * size of the hash digest size.
3261 */
3262 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3263 aeadctx->enckey_len = keys.enckeylen;
3264 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3265 aeadctx->enckey_len << 3);
3266
3267 base_hash = chcr_alloc_shash(max_authsize);
3268 if (IS_ERR(base_hash)) {
3269 pr_err("chcr : Base driver cannot be loaded\n");
0e93708d
HJ
3270 aeadctx->enckey_len = 0;
3271 return -EINVAL;
324429d7 3272 }
2debd332
HJ
3273 {
3274 SHASH_DESC_ON_STACK(shash, base_hash);
3275 shash->tfm = base_hash;
3276 shash->flags = crypto_shash_get_flags(base_hash);
3277 bs = crypto_shash_blocksize(base_hash);
3278 align = KEYCTX_ALIGN_PAD(max_authsize);
3279 o_ptr = actx->h_iopad + param.result_size + align;
3280
3281 if (keys.authkeylen > bs) {
3282 err = crypto_shash_digest(shash, keys.authkey,
3283 keys.authkeylen,
3284 o_ptr);
3285 if (err) {
3286 pr_err("chcr : Base driver cannot be loaded\n");
3287 goto out;
3288 }
3289 keys.authkeylen = max_authsize;
3290 } else
3291 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3292
3293 /* Compute the ipad-digest*/
3294 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3295 memcpy(pad, o_ptr, keys.authkeylen);
3296 for (i = 0; i < bs >> 2; i++)
3297 *((unsigned int *)pad + i) ^= IPAD_DATA;
3298
3299 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3300 max_authsize))
3301 goto out;
3302 /* Compute the opad-digest */
3303 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3304 memcpy(pad, o_ptr, keys.authkeylen);
3305 for (i = 0; i < bs >> 2; i++)
3306 *((unsigned int *)pad + i) ^= OPAD_DATA;
3307
3308 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3309 goto out;
3310
3311 /* convert the ipad and opad digest to network order */
3312 chcr_change_order(actx->h_iopad, param.result_size);
3313 chcr_change_order(o_ptr, param.result_size);
3314 key_ctx_len = sizeof(struct _key_ctx) +
3315 ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3316 (param.result_size + align) * 2;
3317 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3318 0, 1, key_ctx_len >> 4);
3319 actx->auth_mode = param.auth_mode;
3320 chcr_free_shash(base_hash);
3321
3322 return 0;
3323 }
3324out:
3325 aeadctx->enckey_len = 0;
ec1bca94 3326 if (!IS_ERR(base_hash))
2debd332
HJ
3327 chcr_free_shash(base_hash);
3328 return -EINVAL;
324429d7
HS
3329}
3330
2debd332
HJ
3331static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3332 const u8 *key, unsigned int keylen)
3333{
2f47d580 3334 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3335 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3336 struct crypto_authenc_keys keys;
0e93708d 3337 int err;
2debd332
HJ
3338 /* it contains auth and cipher key both*/
3339 int key_ctx_len = 0;
3340 unsigned char ck_size = 0;
3341
0e93708d
HJ
3342 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3343 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3344 & CRYPTO_TFM_REQ_MASK);
3345 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3346 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3347 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3348 & CRYPTO_TFM_RES_MASK);
3349 if (err)
3350 goto out;
3351
2debd332
HJ
3352 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3353 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3354 goto out;
3355 }
3356 if (keys.enckeylen == AES_KEYSIZE_128) {
3357 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3358 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3359 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3360 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3361 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3362 } else {
3363 pr_err("chcr : Unsupported cipher key\n");
3364 goto out;
3365 }
3366 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3367 aeadctx->enckey_len = keys.enckeylen;
3368 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3369 aeadctx->enckey_len << 3);
3370 key_ctx_len = sizeof(struct _key_ctx)
3371 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3372
3373 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3374 0, key_ctx_len >> 4);
3375 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3376 return 0;
3377out:
3378 aeadctx->enckey_len = 0;
3379 return -EINVAL;
3380}
3381static int chcr_aead_encrypt(struct aead_request *req)
3382{
3383 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3384 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3385
3386 reqctx->verify = VERIFY_HW;
3387
3388 switch (get_aead_subtype(tfm)) {
3389 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3390 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3391 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3392 create_authenc_wr);
3393 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3394 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3395 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3396 create_aead_ccm_wr);
3397 default:
3398 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3399 create_gcm_wr);
3400 }
3401}
3402
3403static int chcr_aead_decrypt(struct aead_request *req)
3404{
3405 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 3406 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3407 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3408 int size;
3409
3410 if (aeadctx->mayverify == VERIFY_SW) {
3411 size = crypto_aead_maxauthsize(tfm);
3412 reqctx->verify = VERIFY_SW;
3413 } else {
3414 size = 0;
3415 reqctx->verify = VERIFY_HW;
3416 }
3417
3418 switch (get_aead_subtype(tfm)) {
3419 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3420 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3421 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3422 create_authenc_wr);
3423 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3424 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3425 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3426 create_aead_ccm_wr);
3427 default:
3428 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3429 create_gcm_wr);
3430 }
3431}
3432
3433static int chcr_aead_op(struct aead_request *req,
3434 unsigned short op_type,
3435 int size,
3436 create_wr_t create_wr_fn)
3437{
3438 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
5ba042c0 3439 struct uld_ctx *u_ctx;
2debd332
HJ
3440 struct sk_buff *skb;
3441
2f47d580 3442 if (!a_ctx(tfm)->dev) {
2debd332
HJ
3443 pr_err("chcr : %s : No crypto device.\n", __func__);
3444 return -ENXIO;
3445 }
2f47d580 3446 u_ctx = ULD_CTX(a_ctx(tfm));
2debd332 3447 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2f47d580 3448 a_ctx(tfm)->tx_qidx)) {
2debd332
HJ
3449 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3450 return -EBUSY;
3451 }
3452
3453 /* Form a WR from req */
2f47d580 3454 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
2debd332
HJ
3455 op_type);
3456
0e93708d 3457 if (IS_ERR(skb) || !skb)
2debd332 3458 return PTR_ERR(skb);
2debd332
HJ
3459
3460 skb->dev = u_ctx->lldi.ports[0];
2f47d580 3461 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
2debd332
HJ
3462 chcr_send_wr(skb);
3463 return -EINPROGRESS;
3464}
324429d7
HS
3465static struct chcr_alg_template driver_algs[] = {
3466 /* AES-CBC */
3467 {
b8fd1f41 3468 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
324429d7
HS
3469 .is_registered = 0,
3470 .alg.crypto = {
3471 .cra_name = "cbc(aes)",
2debd332 3472 .cra_driver_name = "cbc-aes-chcr",
324429d7 3473 .cra_blocksize = AES_BLOCK_SIZE,
324429d7 3474 .cra_init = chcr_cra_init,
b8fd1f41 3475 .cra_exit = chcr_cra_exit,
324429d7
HS
3476 .cra_u.ablkcipher = {
3477 .min_keysize = AES_MIN_KEY_SIZE,
3478 .max_keysize = AES_MAX_KEY_SIZE,
3479 .ivsize = AES_BLOCK_SIZE,
3480 .setkey = chcr_aes_cbc_setkey,
3481 .encrypt = chcr_aes_encrypt,
3482 .decrypt = chcr_aes_decrypt,
3483 }
3484 }
3485 },
3486 {
b8fd1f41 3487 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
324429d7
HS
3488 .is_registered = 0,
3489 .alg.crypto = {
3490 .cra_name = "xts(aes)",
2debd332 3491 .cra_driver_name = "xts-aes-chcr",
324429d7 3492 .cra_blocksize = AES_BLOCK_SIZE,
324429d7
HS
3493 .cra_init = chcr_cra_init,
3494 .cra_exit = NULL,
b8fd1f41 3495 .cra_u .ablkcipher = {
324429d7
HS
3496 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3497 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3498 .ivsize = AES_BLOCK_SIZE,
3499 .setkey = chcr_aes_xts_setkey,
3500 .encrypt = chcr_aes_encrypt,
3501 .decrypt = chcr_aes_decrypt,
3502 }
3503 }
b8fd1f41
HJ
3504 },
3505 {
3506 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3507 .is_registered = 0,
3508 .alg.crypto = {
3509 .cra_name = "ctr(aes)",
3510 .cra_driver_name = "ctr-aes-chcr",
3511 .cra_blocksize = 1,
3512 .cra_init = chcr_cra_init,
3513 .cra_exit = chcr_cra_exit,
3514 .cra_u.ablkcipher = {
3515 .min_keysize = AES_MIN_KEY_SIZE,
3516 .max_keysize = AES_MAX_KEY_SIZE,
3517 .ivsize = AES_BLOCK_SIZE,
3518 .setkey = chcr_aes_ctr_setkey,
3519 .encrypt = chcr_aes_encrypt,
3520 .decrypt = chcr_aes_decrypt,
3521 }
3522 }
3523 },
3524 {
3525 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3526 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3527 .is_registered = 0,
3528 .alg.crypto = {
3529 .cra_name = "rfc3686(ctr(aes))",
3530 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3531 .cra_blocksize = 1,
3532 .cra_init = chcr_rfc3686_init,
3533 .cra_exit = chcr_cra_exit,
3534 .cra_u.ablkcipher = {
3535 .min_keysize = AES_MIN_KEY_SIZE +
3536 CTR_RFC3686_NONCE_SIZE,
3537 .max_keysize = AES_MAX_KEY_SIZE +
3538 CTR_RFC3686_NONCE_SIZE,
3539 .ivsize = CTR_RFC3686_IV_SIZE,
3540 .setkey = chcr_aes_rfc3686_setkey,
3541 .encrypt = chcr_aes_encrypt,
3542 .decrypt = chcr_aes_decrypt,
3543 .geniv = "seqiv",
3544 }
324429d7
HS
3545 }
3546 },
3547 /* SHA */
3548 {
3549 .type = CRYPTO_ALG_TYPE_AHASH,
3550 .is_registered = 0,
3551 .alg.hash = {
3552 .halg.digestsize = SHA1_DIGEST_SIZE,
3553 .halg.base = {
3554 .cra_name = "sha1",
3555 .cra_driver_name = "sha1-chcr",
3556 .cra_blocksize = SHA1_BLOCK_SIZE,
3557 }
3558 }
3559 },
3560 {
3561 .type = CRYPTO_ALG_TYPE_AHASH,
3562 .is_registered = 0,
3563 .alg.hash = {
3564 .halg.digestsize = SHA256_DIGEST_SIZE,
3565 .halg.base = {
3566 .cra_name = "sha256",
3567 .cra_driver_name = "sha256-chcr",
3568 .cra_blocksize = SHA256_BLOCK_SIZE,
3569 }
3570 }
3571 },
3572 {
3573 .type = CRYPTO_ALG_TYPE_AHASH,
3574 .is_registered = 0,
3575 .alg.hash = {
3576 .halg.digestsize = SHA224_DIGEST_SIZE,
3577 .halg.base = {
3578 .cra_name = "sha224",
3579 .cra_driver_name = "sha224-chcr",
3580 .cra_blocksize = SHA224_BLOCK_SIZE,
3581 }
3582 }
3583 },
3584 {
3585 .type = CRYPTO_ALG_TYPE_AHASH,
3586 .is_registered = 0,
3587 .alg.hash = {
3588 .halg.digestsize = SHA384_DIGEST_SIZE,
3589 .halg.base = {
3590 .cra_name = "sha384",
3591 .cra_driver_name = "sha384-chcr",
3592 .cra_blocksize = SHA384_BLOCK_SIZE,
3593 }
3594 }
3595 },
3596 {
3597 .type = CRYPTO_ALG_TYPE_AHASH,
3598 .is_registered = 0,
3599 .alg.hash = {
3600 .halg.digestsize = SHA512_DIGEST_SIZE,
3601 .halg.base = {
3602 .cra_name = "sha512",
3603 .cra_driver_name = "sha512-chcr",
3604 .cra_blocksize = SHA512_BLOCK_SIZE,
3605 }
3606 }
3607 },
3608 /* HMAC */
3609 {
3610 .type = CRYPTO_ALG_TYPE_HMAC,
3611 .is_registered = 0,
3612 .alg.hash = {
3613 .halg.digestsize = SHA1_DIGEST_SIZE,
3614 .halg.base = {
3615 .cra_name = "hmac(sha1)",
2debd332 3616 .cra_driver_name = "hmac-sha1-chcr",
324429d7
HS
3617 .cra_blocksize = SHA1_BLOCK_SIZE,
3618 }
3619 }
3620 },
3621 {
3622 .type = CRYPTO_ALG_TYPE_HMAC,
3623 .is_registered = 0,
3624 .alg.hash = {
3625 .halg.digestsize = SHA224_DIGEST_SIZE,
3626 .halg.base = {
3627 .cra_name = "hmac(sha224)",
2debd332 3628 .cra_driver_name = "hmac-sha224-chcr",
324429d7
HS
3629 .cra_blocksize = SHA224_BLOCK_SIZE,
3630 }
3631 }
3632 },
3633 {
3634 .type = CRYPTO_ALG_TYPE_HMAC,
3635 .is_registered = 0,
3636 .alg.hash = {
3637 .halg.digestsize = SHA256_DIGEST_SIZE,
3638 .halg.base = {
3639 .cra_name = "hmac(sha256)",
2debd332 3640 .cra_driver_name = "hmac-sha256-chcr",
324429d7
HS
3641 .cra_blocksize = SHA256_BLOCK_SIZE,
3642 }
3643 }
3644 },
3645 {
3646 .type = CRYPTO_ALG_TYPE_HMAC,
3647 .is_registered = 0,
3648 .alg.hash = {
3649 .halg.digestsize = SHA384_DIGEST_SIZE,
3650 .halg.base = {
3651 .cra_name = "hmac(sha384)",
2debd332 3652 .cra_driver_name = "hmac-sha384-chcr",
324429d7
HS
3653 .cra_blocksize = SHA384_BLOCK_SIZE,
3654 }
3655 }
3656 },
3657 {
3658 .type = CRYPTO_ALG_TYPE_HMAC,
3659 .is_registered = 0,
3660 .alg.hash = {
3661 .halg.digestsize = SHA512_DIGEST_SIZE,
3662 .halg.base = {
3663 .cra_name = "hmac(sha512)",
2debd332 3664 .cra_driver_name = "hmac-sha512-chcr",
324429d7
HS
3665 .cra_blocksize = SHA512_BLOCK_SIZE,
3666 }
3667 }
3668 },
2debd332
HJ
3669 /* Add AEAD Algorithms */
3670 {
3671 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3672 .is_registered = 0,
3673 .alg.aead = {
3674 .base = {
3675 .cra_name = "gcm(aes)",
3676 .cra_driver_name = "gcm-aes-chcr",
3677 .cra_blocksize = 1,
e29abda5 3678 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3679 .cra_ctxsize = sizeof(struct chcr_context) +
3680 sizeof(struct chcr_aead_ctx) +
3681 sizeof(struct chcr_gcm_ctx),
3682 },
8f6acb7f 3683 .ivsize = GCM_AES_IV_SIZE,
2debd332
HJ
3684 .maxauthsize = GHASH_DIGEST_SIZE,
3685 .setkey = chcr_gcm_setkey,
3686 .setauthsize = chcr_gcm_setauthsize,
3687 }
3688 },
3689 {
3690 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3691 .is_registered = 0,
3692 .alg.aead = {
3693 .base = {
3694 .cra_name = "rfc4106(gcm(aes))",
3695 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3696 .cra_blocksize = 1,
e29abda5 3697 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3698 .cra_ctxsize = sizeof(struct chcr_context) +
3699 sizeof(struct chcr_aead_ctx) +
3700 sizeof(struct chcr_gcm_ctx),
3701
3702 },
8f6acb7f 3703 .ivsize = GCM_RFC4106_IV_SIZE,
2debd332
HJ
3704 .maxauthsize = GHASH_DIGEST_SIZE,
3705 .setkey = chcr_gcm_setkey,
3706 .setauthsize = chcr_4106_4309_setauthsize,
3707 }
3708 },
3709 {
3710 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3711 .is_registered = 0,
3712 .alg.aead = {
3713 .base = {
3714 .cra_name = "ccm(aes)",
3715 .cra_driver_name = "ccm-aes-chcr",
3716 .cra_blocksize = 1,
e29abda5 3717 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3718 .cra_ctxsize = sizeof(struct chcr_context) +
3719 sizeof(struct chcr_aead_ctx),
3720
3721 },
3722 .ivsize = AES_BLOCK_SIZE,
3723 .maxauthsize = GHASH_DIGEST_SIZE,
3724 .setkey = chcr_aead_ccm_setkey,
3725 .setauthsize = chcr_ccm_setauthsize,
3726 }
3727 },
3728 {
3729 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3730 .is_registered = 0,
3731 .alg.aead = {
3732 .base = {
3733 .cra_name = "rfc4309(ccm(aes))",
3734 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3735 .cra_blocksize = 1,
e29abda5 3736 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3737 .cra_ctxsize = sizeof(struct chcr_context) +
3738 sizeof(struct chcr_aead_ctx),
3739
3740 },
3741 .ivsize = 8,
3742 .maxauthsize = GHASH_DIGEST_SIZE,
3743 .setkey = chcr_aead_rfc4309_setkey,
3744 .setauthsize = chcr_4106_4309_setauthsize,
3745 }
3746 },
3747 {
3748 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3749 .is_registered = 0,
3750 .alg.aead = {
3751 .base = {
3752 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3753 .cra_driver_name =
3754 "authenc-hmac-sha1-cbc-aes-chcr",
3755 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3756 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3757 .cra_ctxsize = sizeof(struct chcr_context) +
3758 sizeof(struct chcr_aead_ctx) +
3759 sizeof(struct chcr_authenc_ctx),
3760
3761 },
3762 .ivsize = AES_BLOCK_SIZE,
3763 .maxauthsize = SHA1_DIGEST_SIZE,
3764 .setkey = chcr_authenc_setkey,
3765 .setauthsize = chcr_authenc_setauthsize,
3766 }
3767 },
3768 {
3769 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3770 .is_registered = 0,
3771 .alg.aead = {
3772 .base = {
3773
3774 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3775 .cra_driver_name =
3776 "authenc-hmac-sha256-cbc-aes-chcr",
3777 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3778 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3779 .cra_ctxsize = sizeof(struct chcr_context) +
3780 sizeof(struct chcr_aead_ctx) +
3781 sizeof(struct chcr_authenc_ctx),
3782
3783 },
3784 .ivsize = AES_BLOCK_SIZE,
3785 .maxauthsize = SHA256_DIGEST_SIZE,
3786 .setkey = chcr_authenc_setkey,
3787 .setauthsize = chcr_authenc_setauthsize,
3788 }
3789 },
3790 {
3791 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3792 .is_registered = 0,
3793 .alg.aead = {
3794 .base = {
3795 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3796 .cra_driver_name =
3797 "authenc-hmac-sha224-cbc-aes-chcr",
3798 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3799 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3800 .cra_ctxsize = sizeof(struct chcr_context) +
3801 sizeof(struct chcr_aead_ctx) +
3802 sizeof(struct chcr_authenc_ctx),
3803 },
3804 .ivsize = AES_BLOCK_SIZE,
3805 .maxauthsize = SHA224_DIGEST_SIZE,
3806 .setkey = chcr_authenc_setkey,
3807 .setauthsize = chcr_authenc_setauthsize,
3808 }
3809 },
3810 {
3811 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3812 .is_registered = 0,
3813 .alg.aead = {
3814 .base = {
3815 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3816 .cra_driver_name =
3817 "authenc-hmac-sha384-cbc-aes-chcr",
3818 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3819 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3820 .cra_ctxsize = sizeof(struct chcr_context) +
3821 sizeof(struct chcr_aead_ctx) +
3822 sizeof(struct chcr_authenc_ctx),
3823
3824 },
3825 .ivsize = AES_BLOCK_SIZE,
3826 .maxauthsize = SHA384_DIGEST_SIZE,
3827 .setkey = chcr_authenc_setkey,
3828 .setauthsize = chcr_authenc_setauthsize,
3829 }
3830 },
3831 {
3832 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3833 .is_registered = 0,
3834 .alg.aead = {
3835 .base = {
3836 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3837 .cra_driver_name =
3838 "authenc-hmac-sha512-cbc-aes-chcr",
3839 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3840 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3841 .cra_ctxsize = sizeof(struct chcr_context) +
3842 sizeof(struct chcr_aead_ctx) +
3843 sizeof(struct chcr_authenc_ctx),
3844
3845 },
3846 .ivsize = AES_BLOCK_SIZE,
3847 .maxauthsize = SHA512_DIGEST_SIZE,
3848 .setkey = chcr_authenc_setkey,
3849 .setauthsize = chcr_authenc_setauthsize,
3850 }
3851 },
3852 {
3853 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3854 .is_registered = 0,
3855 .alg.aead = {
3856 .base = {
3857 .cra_name = "authenc(digest_null,cbc(aes))",
3858 .cra_driver_name =
3859 "authenc-digest_null-cbc-aes-chcr",
3860 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3861 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3862 .cra_ctxsize = sizeof(struct chcr_context) +
3863 sizeof(struct chcr_aead_ctx) +
3864 sizeof(struct chcr_authenc_ctx),
3865
3866 },
3867 .ivsize = AES_BLOCK_SIZE,
3868 .maxauthsize = 0,
3869 .setkey = chcr_aead_digest_null_setkey,
3870 .setauthsize = chcr_authenc_null_setauthsize,
3871 }
3872 },
324429d7
HS
3873};
3874
3875/*
3876 * chcr_unregister_alg - Deregister crypto algorithms with
3877 * kernel framework.
3878 */
3879static int chcr_unregister_alg(void)
3880{
3881 int i;
3882
3883 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3884 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3885 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3886 if (driver_algs[i].is_registered)
3887 crypto_unregister_alg(
3888 &driver_algs[i].alg.crypto);
3889 break;
2debd332
HJ
3890 case CRYPTO_ALG_TYPE_AEAD:
3891 if (driver_algs[i].is_registered)
3892 crypto_unregister_aead(
3893 &driver_algs[i].alg.aead);
3894 break;
324429d7
HS
3895 case CRYPTO_ALG_TYPE_AHASH:
3896 if (driver_algs[i].is_registered)
3897 crypto_unregister_ahash(
3898 &driver_algs[i].alg.hash);
3899 break;
3900 }
3901 driver_algs[i].is_registered = 0;
3902 }
3903 return 0;
3904}
3905
3906#define SZ_AHASH_CTX sizeof(struct chcr_context)
3907#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3908#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3909#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3910
3911/*
3912 * chcr_register_alg - Register crypto algorithms with kernel framework.
3913 */
3914static int chcr_register_alg(void)
3915{
3916 struct crypto_alg ai;
3917 struct ahash_alg *a_hash;
3918 int err = 0, i;
3919 char *name = NULL;
3920
3921 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3922 if (driver_algs[i].is_registered)
3923 continue;
3924 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3925 case CRYPTO_ALG_TYPE_ABLKCIPHER:
b8fd1f41
HJ
3926 driver_algs[i].alg.crypto.cra_priority =
3927 CHCR_CRA_PRIORITY;
3928 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3929 driver_algs[i].alg.crypto.cra_flags =
3930 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3931 CRYPTO_ALG_NEED_FALLBACK;
3932 driver_algs[i].alg.crypto.cra_ctxsize =
3933 sizeof(struct chcr_context) +
3934 sizeof(struct ablk_ctx);
3935 driver_algs[i].alg.crypto.cra_alignmask = 0;
3936 driver_algs[i].alg.crypto.cra_type =
3937 &crypto_ablkcipher_type;
324429d7
HS
3938 err = crypto_register_alg(&driver_algs[i].alg.crypto);
3939 name = driver_algs[i].alg.crypto.cra_driver_name;
3940 break;
2debd332 3941 case CRYPTO_ALG_TYPE_AEAD:
2debd332 3942 driver_algs[i].alg.aead.base.cra_flags =
0e93708d
HJ
3943 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3944 CRYPTO_ALG_NEED_FALLBACK;
2debd332
HJ
3945 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3946 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3947 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3948 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3949 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3950 err = crypto_register_aead(&driver_algs[i].alg.aead);
3951 name = driver_algs[i].alg.aead.base.cra_driver_name;
3952 break;
324429d7
HS
3953 case CRYPTO_ALG_TYPE_AHASH:
3954 a_hash = &driver_algs[i].alg.hash;
3955 a_hash->update = chcr_ahash_update;
3956 a_hash->final = chcr_ahash_final;
3957 a_hash->finup = chcr_ahash_finup;
3958 a_hash->digest = chcr_ahash_digest;
3959 a_hash->export = chcr_ahash_export;
3960 a_hash->import = chcr_ahash_import;
3961 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3962 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3963 a_hash->halg.base.cra_module = THIS_MODULE;
3964 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3965 a_hash->halg.base.cra_alignmask = 0;
3966 a_hash->halg.base.cra_exit = NULL;
3967 a_hash->halg.base.cra_type = &crypto_ahash_type;
3968
3969 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3970 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3971 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3972 a_hash->init = chcr_hmac_init;
3973 a_hash->setkey = chcr_ahash_setkey;
3974 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3975 } else {
3976 a_hash->init = chcr_sha_init;
3977 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3978 a_hash->halg.base.cra_init = chcr_sha_cra_init;
3979 }
3980 err = crypto_register_ahash(&driver_algs[i].alg.hash);
3981 ai = driver_algs[i].alg.hash.halg.base;
3982 name = ai.cra_driver_name;
3983 break;
3984 }
3985 if (err) {
3986 pr_err("chcr : %s : Algorithm registration failed\n",
3987 name);
3988 goto register_err;
3989 } else {
3990 driver_algs[i].is_registered = 1;
3991 }
3992 }
3993 return 0;
3994
3995register_err:
3996 chcr_unregister_alg();
3997 return err;
3998}
3999
4000/*
4001 * start_crypto - Register the crypto algorithms.
4002 * This should called once when the first device comesup. After this
4003 * kernel will start calling driver APIs for crypto operations.
4004 */
4005int start_crypto(void)
4006{
4007 return chcr_register_alg();
4008}
4009
4010/*
4011 * stop_crypto - Deregister all the crypto algorithms with kernel.
4012 * This should be called once when the last device goes down. After this
4013 * kernel will not call the driver API for crypto operations.
4014 */
4015int stop_crypto(void)
4016{
4017 chcr_unregister_alg();
4018 return 0;
4019}