]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/chelsio/chcr_algo.c
crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
324429d7
HS
47#include <linux/skbuff.h>
48#include <linux/rtnetlink.h>
49#include <linux/highmem.h>
50#include <linux/scatterlist.h>
51
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
8f6acb7f 55#include <crypto/gcm.h>
324429d7 56#include <crypto/sha.h>
2debd332 57#include <crypto/authenc.h>
b8fd1f41
HJ
58#include <crypto/ctr.h>
59#include <crypto/gf128mul.h>
2debd332
HJ
60#include <crypto/internal/aead.h>
61#include <crypto/null.h>
62#include <crypto/internal/skcipher.h>
63#include <crypto/aead.h>
64#include <crypto/scatterwalk.h>
324429d7
HS
65#include <crypto/internal/hash.h>
66
67#include "t4fw_api.h"
68#include "t4_msg.h"
69#include "chcr_core.h"
70#include "chcr_algo.h"
71#include "chcr_crypto.h"
72
2f47d580
HJ
73#define IV AES_BLOCK_SIZE
74
8579e076
CIK
75static unsigned int sgl_ent_len[] = {
76 0, 0, 16, 24, 40, 48, 64, 72, 88,
77 96, 112, 120, 136, 144, 160, 168, 184,
78 192, 208, 216, 232, 240, 256, 264, 280,
79 288, 304, 312, 328, 336, 352, 360, 376
80};
6dad4e8a 81
8579e076
CIK
82static unsigned int dsgl_ent_len[] = {
83 0, 32, 32, 48, 48, 64, 64, 80, 80,
84 112, 112, 128, 128, 144, 144, 160, 160,
85 192, 192, 208, 208, 224, 224, 240, 240,
86 272, 272, 288, 288, 304, 304, 320, 320
87};
6dad4e8a
AG
88
89static u32 round_constant[11] = {
90 0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 0x1B000000, 0x36000000, 0x6C000000
93};
94
7cea6d3e 95static int chcr_handle_cipher_resp(struct skcipher_request *req,
6dad4e8a
AG
96 unsigned char *input, int err);
97
2debd332
HJ
98static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99{
100 return ctx->crypto_ctx->aeadctx;
101}
102
324429d7
HS
103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104{
105 return ctx->crypto_ctx->ablkctx;
106}
107
108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109{
110 return ctx->crypto_ctx->hmacctx;
111}
112
2debd332
HJ
113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114{
115 return gctx->ctx->gcm;
116}
117
118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119{
120 return gctx->ctx->authenc;
121}
122
324429d7
HS
123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124{
fef4912b 125 return container_of(ctx->dev, struct uld_ctx, dev);
324429d7
HS
126}
127
128static inline int is_ofld_imm(const struct sk_buff *skb)
129{
2f47d580 130 return (skb->len <= SGE_MAX_WR_LEN);
324429d7
HS
131}
132
5110e655
HJ
133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134{
135 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136}
137
2f47d580
HJ
138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139 unsigned int entlen,
140 unsigned int skip)
2956f36c
HJ
141{
142 int nents = 0;
143 unsigned int less;
2f47d580 144 unsigned int skip_len = 0;
2956f36c 145
2f47d580
HJ
146 while (sg && skip) {
147 if (sg_dma_len(sg) <= skip) {
148 skip -= sg_dma_len(sg);
149 skip_len = 0;
150 sg = sg_next(sg);
151 } else {
152 skip_len = skip;
153 skip = 0;
154 }
2956f36c
HJ
155 }
156
2f47d580
HJ
157 while (sg && reqlen) {
158 less = min(reqlen, sg_dma_len(sg) - skip_len);
159 nents += DIV_ROUND_UP(less, entlen);
160 reqlen -= less;
161 skip_len = 0;
162 sg = sg_next(sg);
163 }
2956f36c
HJ
164 return nents;
165}
166
6dad4e8a 167static inline int get_aead_subtype(struct crypto_aead *aead)
2f47d580 168{
6dad4e8a
AG
169 struct aead_alg *alg = crypto_aead_alg(aead);
170 struct chcr_alg_template *chcr_crypto_alg =
171 container_of(alg, struct chcr_alg_template, alg.aead);
172 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
2f47d580 173}
2f47d580 174
6dad4e8a 175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
2debd332
HJ
176{
177 u8 temp[SHA512_DIGEST_SIZE];
178 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179 int authsize = crypto_aead_authsize(tfm);
180 struct cpl_fw6_pld *fw6_pld;
181 int cmp = 0;
182
183 fw6_pld = (struct cpl_fw6_pld *)input;
184 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
d600fc8a 186 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
2debd332
HJ
187 } else {
188
189 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190 authsize, req->assoclen +
191 req->cryptlen - authsize);
d600fc8a 192 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
2debd332
HJ
193 }
194 if (cmp)
195 *err = -EBADMSG;
196 else
197 *err = 0;
198}
199
fef4912b
HJ
200static int chcr_inc_wrcount(struct chcr_dev *dev)
201{
fef4912b 202 if (dev->state == CHCR_DETACH)
33ddc108
AG
203 return 1;
204 atomic_inc(&dev->inflight);
205 return 0;
fef4912b
HJ
206}
207
208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209{
210 atomic_dec(&dev->inflight);
211}
212
f31ba0f9 213static inline int chcr_handle_aead_resp(struct aead_request *req,
6dad4e8a
AG
214 unsigned char *input,
215 int err)
216{
217 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
fef4912b
HJ
218 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219 struct chcr_dev *dev = a_ctx(tfm)->dev;
6dad4e8a 220
4262c98a 221 chcr_aead_common_exit(req);
6dad4e8a
AG
222 if (reqctx->verify == VERIFY_SW) {
223 chcr_verify_tag(req, input, &err);
224 reqctx->verify = VERIFY_HW;
225 }
fef4912b 226 chcr_dec_wrcount(dev);
6dad4e8a 227 req->base.complete(&req->base, err);
f31ba0f9
HJ
228
229 return err;
6dad4e8a
AG
230}
231
2f47d580 232static void get_aes_decrypt_key(unsigned char *dec_key,
39f91a34
HJ
233 const unsigned char *key,
234 unsigned int keylength)
235{
236 u32 temp;
237 u32 w_ring[MAX_NK];
238 int i, j, k;
239 u8 nr, nk;
240
241 switch (keylength) {
242 case AES_KEYLENGTH_128BIT:
243 nk = KEYLENGTH_4BYTES;
244 nr = NUMBER_OF_ROUNDS_10;
245 break;
246 case AES_KEYLENGTH_192BIT:
247 nk = KEYLENGTH_6BYTES;
248 nr = NUMBER_OF_ROUNDS_12;
249 break;
250 case AES_KEYLENGTH_256BIT:
251 nk = KEYLENGTH_8BYTES;
252 nr = NUMBER_OF_ROUNDS_14;
253 break;
254 default:
255 return;
256 }
257 for (i = 0; i < nk; i++)
f3b140ad 258 w_ring[i] = get_unaligned_be32(&key[i * 4]);
39f91a34
HJ
259
260 i = 0;
261 temp = w_ring[nk - 1];
262 while (i + nk < (nr + 1) * 4) {
263 if (!(i % nk)) {
264 /* RotWord(temp) */
265 temp = (temp << 8) | (temp >> 24);
266 temp = aes_ks_subword(temp);
267 temp ^= round_constant[i / nk];
268 } else if (nk == 8 && (i % 4 == 0)) {
269 temp = aes_ks_subword(temp);
270 }
271 w_ring[i % nk] ^= temp;
272 temp = w_ring[i % nk];
273 i++;
274 }
275 i--;
276 for (k = 0, j = i % nk; k < nk; k++) {
f3b140ad 277 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
39f91a34
HJ
278 j--;
279 if (j < 0)
280 j += nk;
281 }
282}
283
e7922729 284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7 285{
ec1bca94 286 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
324429d7
HS
287
288 switch (ds) {
289 case SHA1_DIGEST_SIZE:
e7922729 290 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
291 break;
292 case SHA224_DIGEST_SIZE:
e7922729 293 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
294 break;
295 case SHA256_DIGEST_SIZE:
e7922729 296 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
297 break;
298 case SHA384_DIGEST_SIZE:
e7922729 299 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
300 break;
301 case SHA512_DIGEST_SIZE:
e7922729 302 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
303 break;
304 }
324429d7 305
e7922729 306 return base_hash;
324429d7
HS
307}
308
309static int chcr_compute_partial_hash(struct shash_desc *desc,
310 char *iopad, char *result_hash,
311 int digest_size)
312{
313 struct sha1_state sha1_st;
314 struct sha256_state sha256_st;
315 struct sha512_state sha512_st;
316 int error;
317
318 if (digest_size == SHA1_DIGEST_SIZE) {
319 error = crypto_shash_init(desc) ?:
320 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321 crypto_shash_export(desc, (void *)&sha1_st);
322 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323 } else if (digest_size == SHA224_DIGEST_SIZE) {
324 error = crypto_shash_init(desc) ?:
325 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326 crypto_shash_export(desc, (void *)&sha256_st);
327 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328
329 } else if (digest_size == SHA256_DIGEST_SIZE) {
330 error = crypto_shash_init(desc) ?:
331 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 crypto_shash_export(desc, (void *)&sha256_st);
333 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335 } else if (digest_size == SHA384_DIGEST_SIZE) {
336 error = crypto_shash_init(desc) ?:
337 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338 crypto_shash_export(desc, (void *)&sha512_st);
339 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340
341 } else if (digest_size == SHA512_DIGEST_SIZE) {
342 error = crypto_shash_init(desc) ?:
343 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 crypto_shash_export(desc, (void *)&sha512_st);
345 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 } else {
347 error = -EINVAL;
348 pr_err("Unknown digest size %d\n", digest_size);
349 }
350 return error;
351}
352
353static void chcr_change_order(char *buf, int ds)
354{
355 int i;
356
357 if (ds == SHA512_DIGEST_SIZE) {
358 for (i = 0; i < (ds / sizeof(u64)); i++)
359 *((__be64 *)buf + i) =
360 cpu_to_be64(*((u64 *)buf + i));
361 } else {
362 for (i = 0; i < (ds / sizeof(u32)); i++)
363 *((__be32 *)buf + i) =
364 cpu_to_be32(*((u32 *)buf + i));
365 }
366}
367
368static inline int is_hmac(struct crypto_tfm *tfm)
369{
370 struct crypto_alg *alg = tfm->__crt_alg;
371 struct chcr_alg_template *chcr_crypto_alg =
372 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373 alg.hash);
5c86a8ff 374 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
375 return 1;
376 return 0;
377}
378
2f47d580
HJ
379static inline void dsgl_walk_init(struct dsgl_walk *walk,
380 struct cpl_rx_phys_dsgl *dsgl)
324429d7 381{
2f47d580
HJ
382 walk->dsgl = dsgl;
383 walk->nents = 0;
384 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385}
386
add92a81
HJ
387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388 int pci_chan_id)
2f47d580
HJ
389{
390 struct cpl_rx_phys_dsgl *phys_cpl;
391
392 phys_cpl = walk->dsgl;
324429d7
HS
393
394 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
2f47d580
HJ
396 phys_cpl->pcirlxorder_to_noofsgentr =
397 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404 phys_cpl->rss_hdr_int.qid = htons(qid);
405 phys_cpl->rss_hdr_int.hash_val = 0;
add92a81 406 phys_cpl->rss_hdr_int.channel = pci_chan_id;
2f47d580
HJ
407}
408
409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410 size_t size,
c4f6d44d 411 dma_addr_t addr)
2f47d580
HJ
412{
413 int j;
414
415 if (!size)
416 return;
417 j = walk->nents;
418 walk->to->len[j % 8] = htons(size);
c4f6d44d 419 walk->to->addr[j % 8] = cpu_to_be64(addr);
2f47d580
HJ
420 j++;
421 if ((j % 8) == 0)
422 walk->to++;
423 walk->nents = j;
424}
425
426static void dsgl_walk_add_sg(struct dsgl_walk *walk,
427 struct scatterlist *sg,
428 unsigned int slen,
429 unsigned int skip)
430{
431 int skip_len = 0;
432 unsigned int left_size = slen, len = 0;
433 unsigned int j = walk->nents;
434 int offset, ent_len;
435
436 if (!slen)
437 return;
438 while (sg && skip) {
439 if (sg_dma_len(sg) <= skip) {
440 skip -= sg_dma_len(sg);
441 skip_len = 0;
442 sg = sg_next(sg);
443 } else {
444 skip_len = skip;
445 skip = 0;
446 }
447 }
448
2956f36c 449 while (left_size && sg) {
2f47d580 450 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
2956f36c
HJ
451 offset = 0;
452 while (len) {
2f47d580
HJ
453 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
454 walk->to->len[j % 8] = htons(ent_len);
455 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456 offset + skip_len);
2956f36c
HJ
457 offset += ent_len;
458 len -= ent_len;
459 j++;
460 if ((j % 8) == 0)
2f47d580 461 walk->to++;
2956f36c 462 }
2f47d580
HJ
463 walk->last_sg = sg;
464 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465 skip_len) + skip_len;
466 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467 skip_len = 0;
2956f36c
HJ
468 sg = sg_next(sg);
469 }
2f47d580
HJ
470 walk->nents = j;
471}
472
473static inline void ulptx_walk_init(struct ulptx_walk *walk,
474 struct ulptx_sgl *ulp)
475{
476 walk->sgl = ulp;
477 walk->nents = 0;
478 walk->pair_idx = 0;
479 walk->pair = ulp->sge;
480 walk->last_sg = NULL;
481 walk->last_sg_len = 0;
482}
483
484static inline void ulptx_walk_end(struct ulptx_walk *walk)
485{
486 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487 ULPTX_NSGE_V(walk->nents));
488}
2956f36c 489
2f47d580
HJ
490
491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492 size_t size,
c4f6d44d 493 dma_addr_t addr)
2f47d580
HJ
494{
495 if (!size)
496 return;
497
498 if (walk->nents == 0) {
499 walk->sgl->len0 = cpu_to_be32(size);
c4f6d44d 500 walk->sgl->addr0 = cpu_to_be64(addr);
2f47d580 501 } else {
c4f6d44d 502 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
2f47d580
HJ
503 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504 walk->pair_idx = !walk->pair_idx;
505 if (!walk->pair_idx)
506 walk->pair++;
507 }
508 walk->nents++;
324429d7
HS
509}
510
2f47d580 511static void ulptx_walk_add_sg(struct ulptx_walk *walk,
adf1ca61 512 struct scatterlist *sg,
2f47d580
HJ
513 unsigned int len,
514 unsigned int skip)
324429d7 515{
2f47d580
HJ
516 int small;
517 int skip_len = 0;
518 unsigned int sgmin;
324429d7 519
2f47d580
HJ
520 if (!len)
521 return;
2f47d580
HJ
522 while (sg && skip) {
523 if (sg_dma_len(sg) <= skip) {
524 skip -= sg_dma_len(sg);
525 skip_len = 0;
526 sg = sg_next(sg);
527 } else {
528 skip_len = skip;
529 skip = 0;
530 }
531 }
8daa32b9
HJ
532 WARN(!sg, "SG should not be null here\n");
533 if (sg && (walk->nents == 0)) {
2f47d580
HJ
534 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536 walk->sgl->len0 = cpu_to_be32(sgmin);
537 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538 walk->nents++;
539 len -= sgmin;
540 walk->last_sg = sg;
541 walk->last_sg_len = sgmin + skip_len;
542 skip_len += sgmin;
543 if (sg_dma_len(sg) == skip_len) {
544 sg = sg_next(sg);
545 skip_len = 0;
546 }
547 }
548
549 while (sg && len) {
550 small = min(sg_dma_len(sg) - skip_len, len);
551 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553 walk->pair->addr[walk->pair_idx] =
554 cpu_to_be64(sg_dma_address(sg) + skip_len);
555 walk->pair_idx = !walk->pair_idx;
556 walk->nents++;
557 if (!walk->pair_idx)
558 walk->pair++;
559 len -= sgmin;
560 skip_len += sgmin;
561 walk->last_sg = sg;
562 walk->last_sg_len = skip_len;
563 if (sg_dma_len(sg) == skip_len) {
564 sg = sg_next(sg);
565 skip_len = 0;
566 }
324429d7 567 }
324429d7
HS
568}
569
7cea6d3e 570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
324429d7 571{
7cea6d3e 572 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
324429d7 573 struct chcr_alg_template *chcr_crypto_alg =
7cea6d3e 574 container_of(alg, struct chcr_alg_template, alg.skcipher);
324429d7
HS
575
576 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577}
578
b8fd1f41
HJ
579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580{
581 struct adapter *adap = netdev2adap(dev);
582 struct sge_uld_txq_info *txq_info =
583 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584 struct sge_uld_txq *txq;
585 int ret = 0;
586
587 local_bh_disable();
588 txq = &txq_info->uldtxq[idx];
589 spin_lock(&txq->sendq.lock);
590 if (txq->full)
591 ret = -1;
592 spin_unlock(&txq->sendq.lock);
593 local_bh_enable();
594 return ret;
595}
596
324429d7
HS
597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598 struct _key_ctx *key_ctx)
599{
600 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 601 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
602 } else {
603 memcpy(key_ctx->key,
604 ablkctx->key + (ablkctx->enckey_len >> 1),
605 ablkctx->enckey_len >> 1);
cc1b156d
HJ
606 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
608 }
609 return 0;
610}
5110e655
HJ
611
612static int chcr_hash_ent_in_wr(struct scatterlist *src,
613 unsigned int minsg,
614 unsigned int space,
615 unsigned int srcskip)
616{
617 int srclen = 0;
618 int srcsg = minsg;
619 int soffset = 0, sless;
620
621 if (sg_dma_len(src) == srcskip) {
622 src = sg_next(src);
623 srcskip = 0;
624 }
625 while (src && space > (sgl_ent_len[srcsg + 1])) {
626 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
627 CHCR_SRC_SG_SIZE);
628 srclen += sless;
629 soffset += sless;
630 srcsg++;
631 if (sg_dma_len(src) == (soffset + srcskip)) {
632 src = sg_next(src);
633 soffset = 0;
634 srcskip = 0;
635 }
636 }
637 return srclen;
638}
639
b8fd1f41
HJ
640static int chcr_sg_ent_in_wr(struct scatterlist *src,
641 struct scatterlist *dst,
642 unsigned int minsg,
2f47d580
HJ
643 unsigned int space,
644 unsigned int srcskip,
645 unsigned int dstskip)
b8fd1f41
HJ
646{
647 int srclen = 0, dstlen = 0;
2f47d580 648 int srcsg = minsg, dstsg = minsg;
1d693cf6 649 int offset = 0, soffset = 0, less, sless = 0;
b8fd1f41 650
2f47d580
HJ
651 if (sg_dma_len(src) == srcskip) {
652 src = sg_next(src);
653 srcskip = 0;
654 }
2f47d580
HJ
655 if (sg_dma_len(dst) == dstskip) {
656 dst = sg_next(dst);
657 dstskip = 0;
658 }
659
660 while (src && dst &&
b8fd1f41 661 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
1d693cf6
HJ
662 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663 CHCR_SRC_SG_SIZE);
664 srclen += sless;
b8fd1f41 665 srcsg++;
2956f36c 666 offset = 0;
b8fd1f41
HJ
667 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669 if (srclen <= dstlen)
670 break;
2f47d580 671 less = min_t(unsigned int, sg_dma_len(dst) - offset -
db6deea4 672 dstskip, CHCR_DST_SG_SIZE);
2956f36c
HJ
673 dstlen += less;
674 offset += less;
1d693cf6 675 if ((offset + dstskip) == sg_dma_len(dst)) {
2956f36c
HJ
676 dst = sg_next(dst);
677 offset = 0;
678 }
b8fd1f41 679 dstsg++;
2f47d580 680 dstskip = 0;
b8fd1f41 681 }
1d693cf6
HJ
682 soffset += sless;
683 if ((soffset + srcskip) == sg_dma_len(src)) {
684 src = sg_next(src);
685 srcskip = 0;
686 soffset = 0;
687 }
688
b8fd1f41 689 }
b8fd1f41
HJ
690 return min(srclen, dstlen);
691}
692
d8c6d188
AB
693static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
694 struct skcipher_request *req,
b8fd1f41
HJ
695 u8 *iv,
696 unsigned short op_type)
697{
d8c6d188 698 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
b8fd1f41
HJ
699 int err;
700
d8c6d188
AB
701 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
702 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
703 req->base.complete, req->base.data);
704 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
705 req->cryptlen, iv);
b8fd1f41 706
d8c6d188
AB
707 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
708 crypto_skcipher_encrypt(&reqctx->fallback_req);
b8fd1f41
HJ
709
710 return err;
324429d7 711
b8fd1f41 712}
567be3a5
AS
713
714static inline int get_qidxs(struct crypto_async_request *req,
715 unsigned int *txqidx, unsigned int *rxqidx)
716{
717 struct crypto_tfm *tfm = req->tfm;
718 int ret = 0;
719
720 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
721 case CRYPTO_ALG_TYPE_AEAD:
722 {
723 struct aead_request *aead_req =
724 container_of(req, struct aead_request, base);
725 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
726 *txqidx = reqctx->txqidx;
727 *rxqidx = reqctx->rxqidx;
728 break;
729 }
730 case CRYPTO_ALG_TYPE_SKCIPHER:
731 {
732 struct skcipher_request *sk_req =
733 container_of(req, struct skcipher_request, base);
734 struct chcr_skcipher_req_ctx *reqctx =
735 skcipher_request_ctx(sk_req);
736 *txqidx = reqctx->txqidx;
737 *rxqidx = reqctx->rxqidx;
738 break;
739 }
740 case CRYPTO_ALG_TYPE_AHASH:
741 {
742 struct ahash_request *ahash_req =
743 container_of(req, struct ahash_request, base);
744 struct chcr_ahash_req_ctx *reqctx =
745 ahash_request_ctx(ahash_req);
746 *txqidx = reqctx->txqidx;
747 *rxqidx = reqctx->rxqidx;
748 break;
749 }
750 default:
751 ret = -EINVAL;
752 /* should never get here */
753 BUG();
754 break;
755 }
756 return ret;
757}
758
324429d7 759static inline void create_wreq(struct chcr_context *ctx,
358961d1 760 struct chcr_wr *chcr_req,
2f47d580
HJ
761 struct crypto_async_request *req,
762 unsigned int imm,
570265bf 763 int hash_sz,
2f47d580 764 unsigned int len16,
2512a624
HJ
765 unsigned int sc_len,
766 unsigned int lcb)
324429d7
HS
767{
768 struct uld_ctx *u_ctx = ULD_CTX(ctx);
567be3a5
AS
769 unsigned int tx_channel_id, rx_channel_id;
770 unsigned int txqidx = 0, rxqidx = 0;
771 unsigned int qid, fid;
772
773 get_qidxs(req, &txqidx, &rxqidx);
774 qid = u_ctx->lldi.rxq_ids[rxqidx];
775 fid = u_ctx->lldi.rxq_ids[0];
776 tx_channel_id = txqidx / ctx->txq_perchan;
777 rx_channel_id = rxqidx / ctx->rxq_perchan;
324429d7 778
324429d7 779
570265bf 780 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
358961d1 781 chcr_req->wreq.pld_size_hash_size =
570265bf 782 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1 783 chcr_req->wreq.len16_pkd =
2f47d580 784 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
358961d1 785 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
567be3a5
AS
786 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
787 !!lcb, txqidx);
324429d7 788
567be3a5 789 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
2f47d580 790 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
567be3a5 791 ((sizeof(chcr_req->wreq)) >> 4)));
2f47d580 792 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
358961d1 793 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
2f47d580 794 sizeof(chcr_req->key_ctx) + sc_len);
324429d7
HS
795}
796
797/**
798 * create_cipher_wr - form the WR for cipher operations
799 * @req: cipher req.
800 * @ctx: crypto driver context of the request.
801 * @qid: ingress qid where response of this WR should be received.
802 * @op_type: encryption or decryption
803 */
b8fd1f41 804static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
324429d7 805{
7cea6d3e 806 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
567be3a5
AS
807 struct chcr_context *ctx = c_ctx(tfm);
808 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7 809 struct sk_buff *skb = NULL;
358961d1 810 struct chcr_wr *chcr_req;
324429d7 811 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 812 struct ulptx_sgl *ulptx;
7cea6d3e
AB
813 struct chcr_skcipher_req_ctx *reqctx =
814 skcipher_request_ctx(wrparam->req);
2f47d580 815 unsigned int temp = 0, transhdr_len, dst_size;
b8fd1f41 816 int error;
2956f36c 817 int nents;
2f47d580 818 unsigned int kctx_len;
b8fd1f41
HJ
819 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
820 GFP_KERNEL : GFP_ATOMIC;
567be3a5
AS
821 struct adapter *adap = padap(ctx->dev);
822 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
324429d7 823
2f47d580
HJ
824 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
825 reqctx->dst_ofst);
335bcc4a 826 dst_size = get_space_for_phys_dsgl(nents);
125d01ca 827 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580
HJ
828 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
829 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
830 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
335bcc4a
HJ
831 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
832 (sgl_len(nents) * 8);
2f47d580 833 transhdr_len += temp;
125d01ca 834 transhdr_len = roundup(transhdr_len, 16);
2f47d580 835 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
b8fd1f41
HJ
836 if (!skb) {
837 error = -ENOMEM;
838 goto err;
839 }
de77b966 840 chcr_req = __skb_put_zero(skb, transhdr_len);
358961d1 841 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5 842 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
358961d1 843
2f47d580 844 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
358961d1 845 chcr_req->sec_cpl.aadstart_cipherstop_hi =
2f47d580 846 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
358961d1
HJ
847
848 chcr_req->sec_cpl.cipherstop_lo_authinsert =
849 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
b8fd1f41 850 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
324429d7 851 ablkctx->ciph_mode,
2f47d580 852 0, 0, IV >> 1);
358961d1 853 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
335bcc4a 854 0, 1, dst_size);
324429d7 855
358961d1 856 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
b8fd1f41 857 if ((reqctx->op == CHCR_DECRYPT_OP) &&
7cea6d3e 858 (!(get_cryptoalg_subtype(tfm) ==
b8fd1f41 859 CRYPTO_ALG_SUB_TYPE_CTR)) &&
7cea6d3e 860 (!(get_cryptoalg_subtype(tfm) ==
b8fd1f41 861 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
358961d1 862 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7 863 } else {
b8fd1f41
HJ
864 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
865 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
358961d1
HJ
866 memcpy(chcr_req->key_ctx.key, ablkctx->key,
867 ablkctx->enckey_len);
324429d7 868 } else {
358961d1 869 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
870 (ablkctx->enckey_len >> 1),
871 ablkctx->enckey_len >> 1);
358961d1 872 memcpy(chcr_req->key_ctx.key +
324429d7
HS
873 (ablkctx->enckey_len >> 1),
874 ablkctx->key,
875 ablkctx->enckey_len >> 1);
876 }
877 }
358961d1 878 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2f47d580
HJ
879 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
880 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
881 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
324429d7 882
ee0863ba 883 atomic_inc(&adap->chcr_stats.cipher_rqst);
335bcc4a
HJ
884 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
885 + (reqctx->imm ? (wrparam->bytes) : 0);
2f47d580
HJ
886 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
887 transhdr_len, temp,
2512a624 888 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
5c86a8ff 889 reqctx->skb = skb;
5fb78dba
HJ
890
891 if (reqctx->op && (ablkctx->ciph_mode ==
892 CHCR_SCMD_CIPHER_MODE_AES_CBC))
893 sg_pcopy_to_buffer(wrparam->req->src,
7cea6d3e 894 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
5fb78dba
HJ
895 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
896
324429d7 897 return skb;
b8fd1f41
HJ
898err:
899 return ERR_PTR(error);
900}
901
902static inline int chcr_keyctx_ck_size(unsigned int keylen)
903{
904 int ck_size = 0;
905
906 if (keylen == AES_KEYSIZE_128)
907 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
908 else if (keylen == AES_KEYSIZE_192)
909 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
910 else if (keylen == AES_KEYSIZE_256)
911 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
912 else
913 ck_size = 0;
914
915 return ck_size;
916}
7cea6d3e 917static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
918 const u8 *key,
919 unsigned int keylen)
920{
2f47d580 921 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41 922
d8c6d188 923 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
28874f26 924 CRYPTO_TFM_REQ_MASK);
d8c6d188 925 crypto_skcipher_set_flags(ablkctx->sw_cipher,
28874f26 926 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
d8c6d188 927 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
324429d7
HS
928}
929
7cea6d3e 930static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
b8fd1f41 931 const u8 *key,
324429d7
HS
932 unsigned int keylen)
933{
2f47d580 934 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7
HS
935 unsigned int ck_size, context_size;
936 u16 alignment = 0;
b8fd1f41 937 int err;
324429d7 938
b8fd1f41
HJ
939 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
940 if (err)
324429d7 941 goto badkey_err;
b8fd1f41
HJ
942
943 ck_size = chcr_keyctx_ck_size(keylen);
944 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
cc1b156d
HJ
945 memcpy(ablkctx->key, key, keylen);
946 ablkctx->enckey_len = keylen;
947 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
948 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
949 keylen + alignment) >> 4;
950
951 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
952 0, 0, context_size);
953 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
954 return 0;
955badkey_err:
324429d7 956 ablkctx->enckey_len = 0;
b8fd1f41
HJ
957
958 return err;
324429d7
HS
959}
960
7cea6d3e 961static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
962 const u8 *key,
963 unsigned int keylen)
324429d7 964{
2f47d580 965 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
966 unsigned int ck_size, context_size;
967 u16 alignment = 0;
968 int err;
969
970 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
971 if (err)
972 goto badkey_err;
973 ck_size = chcr_keyctx_ck_size(keylen);
974 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
975 memcpy(ablkctx->key, key, keylen);
976 ablkctx->enckey_len = keylen;
977 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
978 keylen + alignment) >> 4;
979
980 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
981 0, 0, context_size);
982 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
983
984 return 0;
985badkey_err:
b8fd1f41
HJ
986 ablkctx->enckey_len = 0;
987
988 return err;
989}
990
7cea6d3e 991static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
b8fd1f41
HJ
992 const u8 *key,
993 unsigned int keylen)
994{
2f47d580 995 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
b8fd1f41
HJ
996 unsigned int ck_size, context_size;
997 u16 alignment = 0;
998 int err;
999
1000 if (keylen < CTR_RFC3686_NONCE_SIZE)
1001 return -EINVAL;
1002 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1003 CTR_RFC3686_NONCE_SIZE);
1004
1005 keylen -= CTR_RFC3686_NONCE_SIZE;
1006 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1007 if (err)
1008 goto badkey_err;
1009
1010 ck_size = chcr_keyctx_ck_size(keylen);
1011 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1012 memcpy(ablkctx->key, key, keylen);
1013 ablkctx->enckey_len = keylen;
1014 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1015 keylen + alignment) >> 4;
1016
1017 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1018 0, 0, context_size);
1019 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1020
1021 return 0;
1022badkey_err:
b8fd1f41
HJ
1023 ablkctx->enckey_len = 0;
1024
1025 return err;
1026}
1027static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1028{
1029 unsigned int size = AES_BLOCK_SIZE;
1030 __be32 *b = (__be32 *)(dstiv + size);
1031 u32 c, prev;
1032
1033 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1034 for (; size >= 4; size -= 4) {
1035 prev = be32_to_cpu(*--b);
1036 c = prev + add;
1037 *b = cpu_to_be32(c);
1038 if (prev < c)
1039 break;
1040 add = 1;
1041 }
1042
1043}
1044
1045static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1046{
1047 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1048 u64 c;
1049 u32 temp = be32_to_cpu(*--b);
1050
1051 temp = ~temp;
6b363a28
DSK
1052 c = (u64)temp + 1; // No of block can processed without overflow
1053 if ((bytes / AES_BLOCK_SIZE) >= c)
b8fd1f41
HJ
1054 bytes = c * AES_BLOCK_SIZE;
1055 return bytes;
1056}
1057
7cea6d3e 1058static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
209897d5 1059 u32 isfinal)
b8fd1f41 1060{
7cea6d3e 1061 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2f47d580 1062 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
7cea6d3e 1063 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
571c47ab 1064 struct crypto_aes_ctx aes;
b8fd1f41
HJ
1065 int ret, i;
1066 u8 *key;
1067 unsigned int keylen;
de1a00ac
HJ
1068 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1069 int round8 = round / 8;
b8fd1f41 1070
de1a00ac 1071 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
b8fd1f41 1072
b8fd1f41
HJ
1073 keylen = ablkctx->enckey_len / 2;
1074 key = ablkctx->key + keylen;
ee91ac1b
DSK
1075 /* For a 192 bit key remove the padded zeroes which was
1076 * added in chcr_xts_setkey
1077 */
1078 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1079 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1080 ret = aes_expandkey(&aes, key, keylen - 8);
1081 else
1082 ret = aes_expandkey(&aes, key, keylen);
b8fd1f41 1083 if (ret)
571c47ab
AB
1084 return ret;
1085 aes_encrypt(&aes, iv, iv);
de1a00ac
HJ
1086 for (i = 0; i < round8; i++)
1087 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1088
1089 for (i = 0; i < (round % 8); i++)
b8fd1f41
HJ
1090 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091
209897d5 1092 if (!isfinal)
571c47ab
AB
1093 aes_decrypt(&aes, iv, iv);
1094
1095 memzero_explicit(&aes, sizeof(aes));
1096 return 0;
b8fd1f41
HJ
1097}
1098
7cea6d3e 1099static int chcr_update_cipher_iv(struct skcipher_request *req,
b8fd1f41
HJ
1100 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1101{
7cea6d3e
AB
1102 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1103 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1104 int subtype = get_cryptoalg_subtype(tfm);
ab677ff4 1105 int ret = 0;
324429d7 1106
b8fd1f41 1107 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
7cea6d3e 1108 ctr_add_iv(iv, req->iv, (reqctx->processed /
b8fd1f41
HJ
1109 AES_BLOCK_SIZE));
1110 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1111 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1112 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1113 AES_BLOCK_SIZE) + 1);
1114 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
209897d5 1115 ret = chcr_update_tweak(req, iv, 0);
b8fd1f41
HJ
1116 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1117 if (reqctx->op)
5fb78dba 1118 /*Updated before sending last WR*/
7cea6d3e 1119 memcpy(iv, req->iv, AES_BLOCK_SIZE);
b8fd1f41
HJ
1120 else
1121 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1122 }
1123
324429d7 1124 return ret;
b8fd1f41 1125
324429d7
HS
1126}
1127
b8fd1f41
HJ
1128/* We need separate function for final iv because in rfc3686 Initial counter
1129 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1130 * for subsequent update requests
1131 */
1132
7cea6d3e 1133static int chcr_final_cipher_iv(struct skcipher_request *req,
b8fd1f41
HJ
1134 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1135{
7cea6d3e
AB
1136 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1137 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1138 int subtype = get_cryptoalg_subtype(tfm);
b8fd1f41
HJ
1139 int ret = 0;
1140
1141 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
7cea6d3e 1142 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
0a4491d3 1143 AES_BLOCK_SIZE));
bed44d0c
AS
1144 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1145 if (!reqctx->partial_req)
1146 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1147 else
1148 ret = chcr_update_tweak(req, iv, 1);
1149 }
b8fd1f41 1150 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
5fb78dba
HJ
1151 /*Already updated for Decrypt*/
1152 if (!reqctx->op)
b8fd1f41
HJ
1153 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1154
1155 }
1156 return ret;
1157
1158}
1159
7cea6d3e 1160static int chcr_handle_cipher_resp(struct skcipher_request *req,
b8fd1f41 1161 unsigned char *input, int err)
324429d7 1162{
6b363a28 1163 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
7cea6d3e 1164 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
b8fd1f41 1165 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
6b363a28
DSK
1166 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1167 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
fef4912b 1168 struct chcr_dev *dev = c_ctx(tfm)->dev;
6b363a28
DSK
1169 struct chcr_context *ctx = c_ctx(tfm);
1170 struct adapter *adap = padap(ctx->dev);
1171 struct cipher_wr_param wrparam;
1172 struct sk_buff *skb;
b8fd1f41
HJ
1173 int bytes;
1174
b8fd1f41 1175 if (err)
2f47d580 1176 goto unmap;
7cea6d3e 1177 if (req->cryptlen == reqctx->processed) {
2f47d580
HJ
1178 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1179 req);
7cea6d3e 1180 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
b8fd1f41
HJ
1181 goto complete;
1182 }
1183
2f47d580 1184 if (!reqctx->imm) {
335bcc4a 1185 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
5110e655 1186 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1187 reqctx->src_ofst, reqctx->dst_ofst);
7cea6d3e
AB
1188 if ((bytes + reqctx->processed) >= req->cryptlen)
1189 bytes = req->cryptlen - reqctx->processed;
db6deea4 1190 else
125d01ca 1191 bytes = rounddown(bytes, 16);
2f47d580
HJ
1192 } else {
1193 /*CTR mode counter overfloa*/
7cea6d3e 1194 bytes = req->cryptlen - reqctx->processed;
2f47d580 1195 }
b8fd1f41
HJ
1196 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1197 if (err)
2f47d580 1198 goto unmap;
b8fd1f41
HJ
1199
1200 if (unlikely(bytes == 0)) {
2f47d580
HJ
1201 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1202 req);
6b363a28
DSK
1203 memcpy(req->iv, reqctx->init_iv, IV);
1204 atomic_inc(&adap->chcr_stats.fallback);
d8c6d188
AB
1205 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1206 reqctx->op);
b8fd1f41
HJ
1207 goto complete;
1208 }
1209
7cea6d3e 1210 if (get_cryptoalg_subtype(tfm) ==
b8fd1f41
HJ
1211 CRYPTO_ALG_SUB_TYPE_CTR)
1212 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
567be3a5 1213 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
b8fd1f41
HJ
1214 wrparam.req = req;
1215 wrparam.bytes = bytes;
1216 skb = create_cipher_wr(&wrparam);
1217 if (IS_ERR(skb)) {
1218 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1219 err = PTR_ERR(skb);
2f47d580 1220 goto unmap;
b8fd1f41
HJ
1221 }
1222 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1223 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
b8fd1f41 1224 chcr_send_wr(skb);
2f47d580
HJ
1225 reqctx->last_req_len = bytes;
1226 reqctx->processed += bytes;
1c502e2e
AS
1227 if (get_cryptoalg_subtype(tfm) ==
1228 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230 complete(&ctx->cbc_aes_aio_done);
1231 }
b8fd1f41 1232 return 0;
2f47d580
HJ
1233unmap:
1234 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41 1235complete:
1c502e2e
AS
1236 if (get_cryptoalg_subtype(tfm) ==
1237 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239 complete(&ctx->cbc_aes_aio_done);
1240 }
fef4912b 1241 chcr_dec_wrcount(dev);
b8fd1f41
HJ
1242 req->base.complete(&req->base, err);
1243 return err;
1244}
1245
7cea6d3e 1246static int process_cipher(struct skcipher_request *req,
b8fd1f41
HJ
1247 unsigned short qid,
1248 struct sk_buff **skb,
1249 unsigned short op_type)
1250{
6b363a28 1251 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
7cea6d3e
AB
1252 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1253 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
2f47d580 1254 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
6b363a28 1255 struct adapter *adap = padap(c_ctx(tfm)->dev);
b8fd1f41 1256 struct cipher_wr_param wrparam;
2956f36c 1257 int bytes, err = -EINVAL;
6b363a28 1258 int subtype;
b8fd1f41 1259
b8fd1f41 1260 reqctx->processed = 0;
bed44d0c 1261 reqctx->partial_req = 0;
7cea6d3e 1262 if (!req->iv)
b8fd1f41 1263 goto error;
6b363a28 1264 subtype = get_cryptoalg_subtype(tfm);
b8fd1f41 1265 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
7cea6d3e
AB
1266 (req->cryptlen == 0) ||
1267 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
6b363a28
DSK
1268 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1269 goto fallback;
1270 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1271 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1272 goto fallback;
b8fd1f41 1273 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
7cea6d3e 1274 ablkctx->enckey_len, req->cryptlen, ivsize);
b8fd1f41
HJ
1275 goto error;
1276 }
fef4912b
HJ
1277
1278 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279 if (err)
1280 goto error;
7cea6d3e 1281 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
2f47d580
HJ
1282 AES_MIN_KEY_SIZE +
1283 sizeof(struct cpl_rx_phys_dsgl) +
1284 /*Min dsgl size*/
1285 32))) {
1286 /* Can be sent as Imm*/
1287 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1288
7cea6d3e 1289 dnents = sg_nents_xlen(req->dst, req->cryptlen,
2f47d580 1290 CHCR_DST_SG_SIZE, 0);
2f47d580 1291 phys_dsgl = get_space_for_phys_dsgl(dnents);
125d01ca 1292 kctx_len = roundup(ablkctx->enckey_len, 16);
2f47d580 1293 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
7cea6d3e 1294 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
2f47d580 1295 SGE_MAX_WR_LEN;
7cea6d3e 1296 bytes = IV + req->cryptlen;
2f47d580
HJ
1297
1298 } else {
1299 reqctx->imm = 0;
1300 }
1301
1302 if (!reqctx->imm) {
335bcc4a 1303 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
5110e655 1304 CIP_SPACE_LEFT(ablkctx->enckey_len),
2f47d580 1305 0, 0);
7cea6d3e
AB
1306 if ((bytes + reqctx->processed) >= req->cryptlen)
1307 bytes = req->cryptlen - reqctx->processed;
db6deea4 1308 else
125d01ca 1309 bytes = rounddown(bytes, 16);
2f47d580 1310 } else {
7cea6d3e 1311 bytes = req->cryptlen;
2f47d580 1312 }
6b363a28 1313 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
7cea6d3e 1314 bytes = adjust_ctr_overflow(req->iv, bytes);
b8fd1f41 1315 }
6b363a28 1316 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
b8fd1f41 1317 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
7cea6d3e 1318 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
b8fd1f41
HJ
1319 CTR_RFC3686_IV_SIZE);
1320
1321 /* initialize counter portion of counter block */
1322 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1323 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
6b363a28 1324 memcpy(reqctx->init_iv, reqctx->iv, IV);
b8fd1f41
HJ
1325
1326 } else {
1327
7cea6d3e 1328 memcpy(reqctx->iv, req->iv, IV);
6b363a28 1329 memcpy(reqctx->init_iv, req->iv, IV);
b8fd1f41
HJ
1330 }
1331 if (unlikely(bytes == 0)) {
2f47d580
HJ
1332 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1333 req);
6b363a28 1334fallback: atomic_inc(&adap->chcr_stats.fallback);
d8c6d188 1335 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
6b363a28
DSK
1336 subtype ==
1337 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1338 reqctx->iv : req->iv,
b8fd1f41
HJ
1339 op_type);
1340 goto error;
1341 }
b8fd1f41 1342 reqctx->op = op_type;
2f47d580
HJ
1343 reqctx->srcsg = req->src;
1344 reqctx->dstsg = req->dst;
1345 reqctx->src_ofst = 0;
1346 reqctx->dst_ofst = 0;
b8fd1f41
HJ
1347 wrparam.qid = qid;
1348 wrparam.req = req;
1349 wrparam.bytes = bytes;
1350 *skb = create_cipher_wr(&wrparam);
1351 if (IS_ERR(*skb)) {
1352 err = PTR_ERR(*skb);
2f47d580 1353 goto unmap;
b8fd1f41 1354 }
2f47d580
HJ
1355 reqctx->processed = bytes;
1356 reqctx->last_req_len = bytes;
bed44d0c 1357 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
b8fd1f41
HJ
1358
1359 return 0;
2f47d580
HJ
1360unmap:
1361 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
b8fd1f41
HJ
1362error:
1363 return err;
1364}
1365
7cea6d3e 1366static int chcr_aes_encrypt(struct skcipher_request *req)
b8fd1f41 1367{
7cea6d3e 1368 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
bed44d0c 1369 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
fef4912b 1370 struct chcr_dev *dev = c_ctx(tfm)->dev;
b8fd1f41 1371 struct sk_buff *skb = NULL;
567be3a5 1372 int err;
2f47d580 1373 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
567be3a5
AS
1374 struct chcr_context *ctx = c_ctx(tfm);
1375 unsigned int cpu;
1376
1377 cpu = get_cpu();
1378 reqctx->txqidx = cpu % ctx->ntxq;
1379 reqctx->rxqidx = cpu % ctx->nrxq;
1380 put_cpu();
324429d7 1381
fef4912b
HJ
1382 err = chcr_inc_wrcount(dev);
1383 if (err)
1384 return -ENXIO;
324429d7 1385 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1386 reqctx->txqidx) &&
1387 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1388 err = -ENOSPC;
1389 goto error;
324429d7
HS
1390 }
1391
567be3a5 1392 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
2f47d580 1393 &skb, CHCR_ENCRYPT_OP);
b8fd1f41
HJ
1394 if (err || !skb)
1395 return err;
324429d7 1396 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1397 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
324429d7 1398 chcr_send_wr(skb);
1c502e2e
AS
1399 if (get_cryptoalg_subtype(tfm) ==
1400 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1401 CRYPTO_TFM_REQ_MAY_SLEEP ) {
bed44d0c 1402 reqctx->partial_req = 1;
1c502e2e
AS
1403 wait_for_completion(&ctx->cbc_aes_aio_done);
1404 }
567be3a5 1405 return -EINPROGRESS;
fef4912b
HJ
1406error:
1407 chcr_dec_wrcount(dev);
1408 return err;
324429d7
HS
1409}
1410
7cea6d3e 1411static int chcr_aes_decrypt(struct skcipher_request *req)
324429d7 1412{
7cea6d3e 1413 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
567be3a5 1414 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2f47d580 1415 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
fef4912b 1416 struct chcr_dev *dev = c_ctx(tfm)->dev;
b8fd1f41 1417 struct sk_buff *skb = NULL;
567be3a5
AS
1418 int err;
1419 struct chcr_context *ctx = c_ctx(tfm);
1420 unsigned int cpu;
1421
1422 cpu = get_cpu();
1423 reqctx->txqidx = cpu % ctx->ntxq;
1424 reqctx->rxqidx = cpu % ctx->nrxq;
1425 put_cpu();
324429d7 1426
fef4912b
HJ
1427 err = chcr_inc_wrcount(dev);
1428 if (err)
1429 return -ENXIO;
1430
324429d7 1431 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1432 reqctx->txqidx) &&
1433 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
6faa0f57 1434 return -ENOSPC;
567be3a5 1435 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
fc6176a2 1436 &skb, CHCR_DECRYPT_OP);
b8fd1f41
HJ
1437 if (err || !skb)
1438 return err;
324429d7 1439 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1440 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
324429d7 1441 chcr_send_wr(skb);
567be3a5 1442 return -EINPROGRESS;
324429d7 1443}
324429d7
HS
1444static int chcr_device_init(struct chcr_context *ctx)
1445{
14c19b17 1446 struct uld_ctx *u_ctx = NULL;
567be3a5
AS
1447 int txq_perchan, ntxq;
1448 int err = 0, rxq_perchan;
324429d7 1449
324429d7 1450 if (!ctx->dev) {
14c19b17
HJ
1451 u_ctx = assign_chcr_device();
1452 if (!u_ctx) {
055be686 1453 err = -ENXIO;
324429d7
HS
1454 pr_err("chcr device assignment fails\n");
1455 goto out;
1456 }
fef4912b 1457 ctx->dev = &u_ctx->dev;
a1c6fd43 1458 ntxq = u_ctx->lldi.ntxq;
324429d7 1459 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
72a56ca9 1460 txq_perchan = ntxq / u_ctx->lldi.nchan;
567be3a5
AS
1461 ctx->ntxq = ntxq;
1462 ctx->nrxq = u_ctx->lldi.nrxq;
1463 ctx->rxq_perchan = rxq_perchan;
1464 ctx->txq_perchan = txq_perchan;
324429d7
HS
1465 }
1466out:
1467 return err;
1468}
1469
7cea6d3e 1470static int chcr_init_tfm(struct crypto_skcipher *tfm)
324429d7 1471{
7cea6d3e
AB
1472 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1473 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1474 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1475
d8c6d188 1476 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
28874f26 1477 CRYPTO_ALG_NEED_FALLBACK);
b8fd1f41 1478 if (IS_ERR(ablkctx->sw_cipher)) {
7cea6d3e 1479 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
b8fd1f41
HJ
1480 return PTR_ERR(ablkctx->sw_cipher);
1481 }
1c502e2e 1482 init_completion(&ctx->cbc_aes_aio_done);
d8c6d188
AB
1483 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1484 crypto_skcipher_reqsize(ablkctx->sw_cipher));
7cea6d3e
AB
1485
1486 return chcr_device_init(ctx);
324429d7
HS
1487}
1488
7cea6d3e 1489static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
b8fd1f41 1490{
7cea6d3e
AB
1491 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1492 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1493 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1494
1495 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1496 * cannot be used as fallback in chcr_handle_cipher_response
1497 */
d8c6d188 1498 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
28874f26 1499 CRYPTO_ALG_NEED_FALLBACK);
b8fd1f41 1500 if (IS_ERR(ablkctx->sw_cipher)) {
7cea6d3e 1501 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
b8fd1f41
HJ
1502 return PTR_ERR(ablkctx->sw_cipher);
1503 }
d8c6d188
AB
1504 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1505 crypto_skcipher_reqsize(ablkctx->sw_cipher));
7cea6d3e 1506 return chcr_device_init(ctx);
b8fd1f41
HJ
1507}
1508
1509
7cea6d3e 1510static void chcr_exit_tfm(struct crypto_skcipher *tfm)
b8fd1f41 1511{
7cea6d3e 1512 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
b8fd1f41
HJ
1513 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1514
d8c6d188 1515 crypto_free_skcipher(ablkctx->sw_cipher);
b8fd1f41
HJ
1516}
1517
324429d7
HS
1518static int get_alg_config(struct algo_param *params,
1519 unsigned int auth_size)
1520{
1521 switch (auth_size) {
1522 case SHA1_DIGEST_SIZE:
1523 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1524 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1525 params->result_size = SHA1_DIGEST_SIZE;
1526 break;
1527 case SHA224_DIGEST_SIZE:
1528 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1529 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1530 params->result_size = SHA256_DIGEST_SIZE;
1531 break;
1532 case SHA256_DIGEST_SIZE:
1533 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1534 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1535 params->result_size = SHA256_DIGEST_SIZE;
1536 break;
1537 case SHA384_DIGEST_SIZE:
1538 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1539 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1540 params->result_size = SHA512_DIGEST_SIZE;
1541 break;
1542 case SHA512_DIGEST_SIZE:
1543 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1544 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1545 params->result_size = SHA512_DIGEST_SIZE;
1546 break;
1547 default:
1548 pr_err("chcr : ERROR, unsupported digest size\n");
1549 return -EINVAL;
1550 }
1551 return 0;
1552}
1553
e7922729 1554static inline void chcr_free_shash(struct crypto_shash *base_hash)
324429d7 1555{
e7922729 1556 crypto_free_shash(base_hash);
324429d7
HS
1557}
1558
1559/**
358961d1 1560 * create_hash_wr - Create hash work request
324429d7
HS
1561 * @req - Cipher req base
1562 */
358961d1 1563static struct sk_buff *create_hash_wr(struct ahash_request *req,
2debd332 1564 struct hash_wr_param *param)
324429d7
HS
1565{
1566 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1567 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
567be3a5
AS
1568 struct chcr_context *ctx = h_ctx(tfm);
1569 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
324429d7 1570 struct sk_buff *skb = NULL;
567be3a5 1571 struct uld_ctx *u_ctx = ULD_CTX(ctx);
358961d1 1572 struct chcr_wr *chcr_req;
2f47d580 1573 struct ulptx_sgl *ulptx;
5110e655
HJ
1574 unsigned int nents = 0, transhdr_len;
1575 unsigned int temp = 0;
358961d1
HJ
1576 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577 GFP_ATOMIC;
2f47d580
HJ
1578 struct adapter *adap = padap(h_ctx(tfm)->dev);
1579 int error = 0;
567be3a5 1580 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
324429d7 1581
5110e655
HJ
1582 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1583 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1584 param->sg_len) <= SGE_MAX_WR_LEN;
1585 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1586 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
2f47d580 1587 nents += param->bfr_len ? 1 : 0;
5110e655
HJ
1588 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1589 param->sg_len, 16) : (sgl_len(nents) * 8);
125d01ca 1590 transhdr_len = roundup(transhdr_len, 16);
2f47d580 1591
5110e655 1592 skb = alloc_skb(transhdr_len, flags);
324429d7 1593 if (!skb)
2f47d580 1594 return ERR_PTR(-ENOMEM);
de77b966 1595 chcr_req = __skb_put_zero(skb, transhdr_len);
324429d7 1596
358961d1 1597 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5
AS
1598 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599
358961d1 1600 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 1601
358961d1 1602 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 1603 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 1604 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 1605 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 1606 chcr_req->sec_cpl.seqno_numivs =
324429d7 1607 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 1608 param->opad_needed, 0);
324429d7 1609
358961d1 1610 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
1611 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612
358961d1
HJ
1613 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1614 param->alg_prm.result_size);
324429d7
HS
1615
1616 if (param->opad_needed)
358961d1
HJ
1617 memcpy(chcr_req->key_ctx.key +
1618 ((param->alg_prm.result_size <= 32) ? 32 :
1619 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
1620 hmacctx->opad, param->alg_prm.result_size);
1621
358961d1 1622 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
1623 param->alg_prm.mk_size, 0,
1624 param->opad_needed,
5110e655 1625 ((param->kctx_len +
358961d1
HJ
1626 sizeof(chcr_req->key_ctx)) >> 4));
1627 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
5110e655 1628 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
2f47d580
HJ
1629 DUMMY_BYTES);
1630 if (param->bfr_len != 0) {
5110e655
HJ
1631 req_ctx->hctx_wr.dma_addr =
1632 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1633 param->bfr_len, DMA_TO_DEVICE);
2f47d580 1634 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
5110e655 1635 req_ctx->hctx_wr. dma_addr)) {
2f47d580
HJ
1636 error = -ENOMEM;
1637 goto err;
1638 }
5110e655 1639 req_ctx->hctx_wr.dma_len = param->bfr_len;
2f47d580 1640 } else {
5110e655 1641 req_ctx->hctx_wr.dma_addr = 0;
2f47d580
HJ
1642 }
1643 chcr_add_hash_src_ent(req, ulptx, param);
1644 /* Request upto max wr size */
5110e655
HJ
1645 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1646 (param->sg_len + param->bfr_len) : 0);
ee0863ba 1647 atomic_inc(&adap->chcr_stats.digest_rqst);
5110e655
HJ
1648 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1649 param->hash_size, transhdr_len,
2f47d580 1650 temp, 0);
5110e655 1651 req_ctx->hctx_wr.skb = skb;
324429d7 1652 return skb;
2f47d580
HJ
1653err:
1654 kfree_skb(skb);
1655 return ERR_PTR(error);
324429d7
HS
1656}
1657
1658static int chcr_ahash_update(struct ahash_request *req)
1659{
1660 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1661 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
567be3a5
AS
1662 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1663 struct chcr_context *ctx = h_ctx(rtfm);
fef4912b 1664 struct chcr_dev *dev = h_ctx(rtfm)->dev;
324429d7
HS
1665 struct sk_buff *skb;
1666 u8 remainder = 0, bs;
1667 unsigned int nbytes = req->nbytes;
1668 struct hash_wr_param params;
567be3a5
AS
1669 int error;
1670 unsigned int cpu;
1671
1672 cpu = get_cpu();
1673 req_ctx->txqidx = cpu % ctx->ntxq;
1674 req_ctx->rxqidx = cpu % ctx->nrxq;
1675 put_cpu();
324429d7
HS
1676
1677 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
324429d7 1678
44fce12a
HJ
1679 if (nbytes + req_ctx->reqlen >= bs) {
1680 remainder = (nbytes + req_ctx->reqlen) % bs;
1681 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 1682 } else {
44fce12a
HJ
1683 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1684 + req_ctx->reqlen, nbytes, 0);
1685 req_ctx->reqlen += nbytes;
324429d7
HS
1686 return 0;
1687 }
fef4912b
HJ
1688 error = chcr_inc_wrcount(dev);
1689 if (error)
1690 return -ENXIO;
1691 /* Detach state for CHCR means lldi or padap is freed. Increasing
1692 * inflight count for dev guarantees that lldi and padap is valid
1693 */
1694 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1695 req_ctx->txqidx) &&
1696 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1697 error = -ENOSPC;
1698 goto err;
fef4912b
HJ
1699 }
1700
5110e655 1701 chcr_init_hctx_per_wr(req_ctx);
2f47d580 1702 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1703 if (error) {
1704 error = -ENOMEM;
1705 goto err;
1706 }
5110e655
HJ
1707 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1708 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1709 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1710 HASH_SPACE_LEFT(params.kctx_len), 0);
1711 if (params.sg_len > req->nbytes)
1712 params.sg_len = req->nbytes;
1713 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714 req_ctx->reqlen;
324429d7
HS
1715 params.opad_needed = 0;
1716 params.more = 1;
1717 params.last = 0;
44fce12a 1718 params.bfr_len = req_ctx->reqlen;
324429d7 1719 params.scmd1 = 0;
5110e655
HJ
1720 req_ctx->hctx_wr.srcsg = req->src;
1721
1722 params.hash_size = params.alg_prm.result_size;
324429d7 1723 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 1724 skb = create_hash_wr(req, &params);
2f47d580
HJ
1725 if (IS_ERR(skb)) {
1726 error = PTR_ERR(skb);
1727 goto unmap;
1728 }
324429d7 1729
5110e655 1730 req_ctx->hctx_wr.processed += params.sg_len;
44fce12a 1731 if (remainder) {
44fce12a 1732 /* Swap buffers */
abfa2b37 1733 swap(req_ctx->reqbfr, req_ctx->skbfr);
324429d7 1734 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 1735 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 1736 remainder);
44fce12a
HJ
1737 }
1738 req_ctx->reqlen = remainder;
324429d7 1739 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1740 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 1741 chcr_send_wr(skb);
567be3a5 1742 return -EINPROGRESS;
2f47d580
HJ
1743unmap:
1744 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1745err:
1746 chcr_dec_wrcount(dev);
2f47d580 1747 return error;
324429d7
HS
1748}
1749
1750static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751{
1752 memset(bfr_ptr, 0, bs);
1753 *bfr_ptr = 0x80;
1754 if (bs == 64)
1755 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1756 else
1757 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1758}
1759
1760static int chcr_ahash_final(struct ahash_request *req)
1761{
1762 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1763 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1764 struct chcr_dev *dev = h_ctx(rtfm)->dev;
324429d7
HS
1765 struct hash_wr_param params;
1766 struct sk_buff *skb;
567be3a5
AS
1767 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1768 struct chcr_context *ctx = h_ctx(rtfm);
324429d7 1769 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
8a656a48 1770 int error;
567be3a5
AS
1771 unsigned int cpu;
1772
1773 cpu = get_cpu();
1774 req_ctx->txqidx = cpu % ctx->ntxq;
1775 req_ctx->rxqidx = cpu % ctx->nrxq;
1776 put_cpu();
fef4912b
HJ
1777
1778 error = chcr_inc_wrcount(dev);
1779 if (error)
1780 return -ENXIO;
324429d7 1781
5110e655 1782 chcr_init_hctx_per_wr(req_ctx);
324429d7
HS
1783 if (is_hmac(crypto_ahash_tfm(rtfm)))
1784 params.opad_needed = 1;
1785 else
1786 params.opad_needed = 0;
1787 params.sg_len = 0;
5110e655 1788 req_ctx->hctx_wr.isfinal = 1;
324429d7 1789 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1790 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1791 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792 params.opad_needed = 1;
1793 params.kctx_len *= 2;
1794 } else {
1795 params.opad_needed = 0;
1796 }
1797
1798 req_ctx->hctx_wr.result = 1;
44fce12a 1799 params.bfr_len = req_ctx->reqlen;
324429d7 1800 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655 1801 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1802 if (req_ctx->reqlen == 0) {
1803 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1804 params.last = 0;
1805 params.more = 1;
1806 params.scmd1 = 0;
1807 params.bfr_len = bs;
1808
1809 } else {
1810 params.scmd1 = req_ctx->data_len;
1811 params.last = 1;
1812 params.more = 0;
1813 }
5110e655 1814 params.hash_size = crypto_ahash_digestsize(rtfm);
358961d1 1815 skb = create_hash_wr(req, &params);
fef4912b
HJ
1816 if (IS_ERR(skb)) {
1817 error = PTR_ERR(skb);
1818 goto err;
1819 }
5110e655 1820 req_ctx->reqlen = 0;
324429d7 1821 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1822 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7
HS
1823 chcr_send_wr(skb);
1824 return -EINPROGRESS;
fef4912b
HJ
1825err:
1826 chcr_dec_wrcount(dev);
1827 return error;
324429d7
HS
1828}
1829
1830static int chcr_ahash_finup(struct ahash_request *req)
1831{
1832 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1834 struct chcr_dev *dev = h_ctx(rtfm)->dev;
567be3a5
AS
1835 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1836 struct chcr_context *ctx = h_ctx(rtfm);
324429d7
HS
1837 struct sk_buff *skb;
1838 struct hash_wr_param params;
1839 u8 bs;
567be3a5
AS
1840 int error;
1841 unsigned int cpu;
1842
1843 cpu = get_cpu();
1844 req_ctx->txqidx = cpu % ctx->ntxq;
1845 req_ctx->rxqidx = cpu % ctx->nrxq;
1846 put_cpu();
324429d7
HS
1847
1848 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
fef4912b
HJ
1849 error = chcr_inc_wrcount(dev);
1850 if (error)
1851 return -ENXIO;
324429d7
HS
1852
1853 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1854 req_ctx->txqidx) &&
1855 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1856 error = -ENOSPC;
1857 goto err;
324429d7 1858 }
5110e655
HJ
1859 chcr_init_hctx_per_wr(req_ctx);
1860 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1861 if (error) {
1862 error = -ENOMEM;
1863 goto err;
1864 }
324429d7 1865
5110e655
HJ
1866 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1867 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1868 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869 params.kctx_len *= 2;
324429d7 1870 params.opad_needed = 1;
5110e655 1871 } else {
324429d7 1872 params.opad_needed = 0;
5110e655 1873 }
324429d7 1874
5110e655
HJ
1875 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1876 HASH_SPACE_LEFT(params.kctx_len), 0);
1877 if (params.sg_len < req->nbytes) {
1878 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1879 params.kctx_len /= 2;
1880 params.opad_needed = 0;
1881 }
1882 params.last = 0;
1883 params.more = 1;
1884 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885 - req_ctx->reqlen;
1886 params.hash_size = params.alg_prm.result_size;
1887 params.scmd1 = 0;
1888 } else {
1889 params.last = 1;
1890 params.more = 0;
1891 params.sg_len = req->nbytes;
1892 params.hash_size = crypto_ahash_digestsize(rtfm);
1893 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1894 params.sg_len;
1895 }
44fce12a 1896 params.bfr_len = req_ctx->reqlen;
324429d7 1897 req_ctx->data_len += params.bfr_len + params.sg_len;
5110e655
HJ
1898 req_ctx->hctx_wr.result = 1;
1899 req_ctx->hctx_wr.srcsg = req->src;
44fce12a
HJ
1900 if ((req_ctx->reqlen + req->nbytes) == 0) {
1901 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1902 params.last = 0;
1903 params.more = 1;
1904 params.scmd1 = 0;
1905 params.bfr_len = bs;
324429d7 1906 }
358961d1 1907 skb = create_hash_wr(req, &params);
2f47d580
HJ
1908 if (IS_ERR(skb)) {
1909 error = PTR_ERR(skb);
1910 goto unmap;
1911 }
5110e655
HJ
1912 req_ctx->reqlen = 0;
1913 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 1914 skb->dev = u_ctx->lldi.ports[0];
567be3a5 1915 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 1916 chcr_send_wr(skb);
567be3a5 1917 return -EINPROGRESS;
2f47d580
HJ
1918unmap:
1919 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1920err:
1921 chcr_dec_wrcount(dev);
2f47d580 1922 return error;
324429d7
HS
1923}
1924
1925static int chcr_ahash_digest(struct ahash_request *req)
1926{
1927 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1928 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
fef4912b 1929 struct chcr_dev *dev = h_ctx(rtfm)->dev;
567be3a5
AS
1930 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1931 struct chcr_context *ctx = h_ctx(rtfm);
324429d7
HS
1932 struct sk_buff *skb;
1933 struct hash_wr_param params;
1934 u8 bs;
567be3a5
AS
1935 int error;
1936 unsigned int cpu;
1937
1938 cpu = get_cpu();
1939 req_ctx->txqidx = cpu % ctx->ntxq;
1940 req_ctx->rxqidx = cpu % ctx->nrxq;
1941 put_cpu();
324429d7
HS
1942
1943 rtfm->init(req);
1944 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
fef4912b
HJ
1945 error = chcr_inc_wrcount(dev);
1946 if (error)
1947 return -ENXIO;
324429d7 1948
324429d7 1949 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
1950 req_ctx->txqidx) &&
1951 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
fef4912b
HJ
1952 error = -ENOSPC;
1953 goto err;
324429d7
HS
1954 }
1955
5110e655 1956 chcr_init_hctx_per_wr(req_ctx);
2f47d580 1957 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
1958 if (error) {
1959 error = -ENOMEM;
1960 goto err;
1961 }
324429d7 1962
324429d7 1963 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
5110e655
HJ
1964 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1965 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1966 params.kctx_len *= 2;
1967 params.opad_needed = 1;
1968 } else {
1969 params.opad_needed = 0;
1970 }
1971 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1972 HASH_SPACE_LEFT(params.kctx_len), 0);
1973 if (params.sg_len < req->nbytes) {
1974 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1975 params.kctx_len /= 2;
1976 params.opad_needed = 0;
1977 }
1978 params.last = 0;
1979 params.more = 1;
1980 params.scmd1 = 0;
1981 params.sg_len = rounddown(params.sg_len, bs);
1982 params.hash_size = params.alg_prm.result_size;
1983 } else {
1984 params.sg_len = req->nbytes;
1985 params.hash_size = crypto_ahash_digestsize(rtfm);
1986 params.last = 1;
1987 params.more = 0;
1988 params.scmd1 = req->nbytes + req_ctx->data_len;
1989
1990 }
1991 params.bfr_len = 0;
1992 req_ctx->hctx_wr.result = 1;
1993 req_ctx->hctx_wr.srcsg = req->src;
324429d7
HS
1994 req_ctx->data_len += params.bfr_len + params.sg_len;
1995
44fce12a 1996 if (req->nbytes == 0) {
02f58e5b 1997 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1998 params.more = 1;
1999 params.bfr_len = bs;
2000 }
2001
358961d1 2002 skb = create_hash_wr(req, &params);
2f47d580
HJ
2003 if (IS_ERR(skb)) {
2004 error = PTR_ERR(skb);
2005 goto unmap;
2006 }
5110e655 2007 req_ctx->hctx_wr.processed += params.sg_len;
324429d7 2008 skb->dev = u_ctx->lldi.ports[0];
567be3a5 2009 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
324429d7 2010 chcr_send_wr(skb);
567be3a5 2011 return -EINPROGRESS;
2f47d580
HJ
2012unmap:
2013 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
fef4912b
HJ
2014err:
2015 chcr_dec_wrcount(dev);
2f47d580 2016 return error;
324429d7
HS
2017}
2018
6f76672b
HJ
2019static int chcr_ahash_continue(struct ahash_request *req)
2020{
2021 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2022 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2023 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
567be3a5
AS
2024 struct chcr_context *ctx = h_ctx(rtfm);
2025 struct uld_ctx *u_ctx = ULD_CTX(ctx);
6f76672b
HJ
2026 struct sk_buff *skb;
2027 struct hash_wr_param params;
2028 u8 bs;
2029 int error;
567be3a5
AS
2030 unsigned int cpu;
2031
2032 cpu = get_cpu();
2033 reqctx->txqidx = cpu % ctx->ntxq;
2034 reqctx->rxqidx = cpu % ctx->nrxq;
2035 put_cpu();
6f76672b
HJ
2036
2037 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
6f76672b
HJ
2038 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2039 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2040 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2041 params.kctx_len *= 2;
2042 params.opad_needed = 1;
2043 } else {
2044 params.opad_needed = 0;
2045 }
2046 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2047 HASH_SPACE_LEFT(params.kctx_len),
2048 hctx_wr->src_ofst);
2049 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2050 params.sg_len = req->nbytes - hctx_wr->processed;
2051 if (!hctx_wr->result ||
2052 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2053 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2054 params.kctx_len /= 2;
2055 params.opad_needed = 0;
2056 }
2057 params.last = 0;
2058 params.more = 1;
2059 params.sg_len = rounddown(params.sg_len, bs);
2060 params.hash_size = params.alg_prm.result_size;
2061 params.scmd1 = 0;
2062 } else {
2063 params.last = 1;
2064 params.more = 0;
2065 params.hash_size = crypto_ahash_digestsize(rtfm);
2066 params.scmd1 = reqctx->data_len + params.sg_len;
2067 }
2068 params.bfr_len = 0;
2069 reqctx->data_len += params.sg_len;
2070 skb = create_hash_wr(req, &params);
2071 if (IS_ERR(skb)) {
2072 error = PTR_ERR(skb);
2073 goto err;
2074 }
2075 hctx_wr->processed += params.sg_len;
2076 skb->dev = u_ctx->lldi.ports[0];
567be3a5 2077 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
6f76672b
HJ
2078 chcr_send_wr(skb);
2079 return 0;
2080err:
2081 return error;
2082}
2083
2084static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2085 unsigned char *input,
2086 int err)
2087{
2088 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2089 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2090 int digestsize, updated_digestsize;
2091 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2092 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
fef4912b 2093 struct chcr_dev *dev = h_ctx(tfm)->dev;
6f76672b
HJ
2094
2095 if (input == NULL)
2096 goto out;
2097 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2098 updated_digestsize = digestsize;
2099 if (digestsize == SHA224_DIGEST_SIZE)
2100 updated_digestsize = SHA256_DIGEST_SIZE;
2101 else if (digestsize == SHA384_DIGEST_SIZE)
2102 updated_digestsize = SHA512_DIGEST_SIZE;
2103
2104 if (hctx_wr->dma_addr) {
2105 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2106 hctx_wr->dma_len, DMA_TO_DEVICE);
2107 hctx_wr->dma_addr = 0;
2108 }
2109 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110 req->nbytes)) {
2111 if (hctx_wr->result == 1) {
2112 hctx_wr->result = 0;
2113 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2114 digestsize);
2115 } else {
2116 memcpy(reqctx->partial_hash,
2117 input + sizeof(struct cpl_fw6_pld),
2118 updated_digestsize);
2119
2120 }
2121 goto unmap;
2122 }
2123 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2124 updated_digestsize);
2125
2126 err = chcr_ahash_continue(req);
2127 if (err)
2128 goto unmap;
2129 return;
2130unmap:
2131 if (hctx_wr->is_sg_map)
2132 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2133
2134
2135out:
fef4912b 2136 chcr_dec_wrcount(dev);
6f76672b
HJ
2137 req->base.complete(&req->base, err);
2138}
2139
2140/*
2141 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2142 * @req: crypto request
2143 */
2144int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2145 int err)
2146{
2147 struct crypto_tfm *tfm = req->tfm;
2148 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2149 struct adapter *adap = padap(ctx->dev);
2150
2151 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2152 case CRYPTO_ALG_TYPE_AEAD:
f31ba0f9 2153 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
6f76672b
HJ
2154 break;
2155
7cea6d3e
AB
2156 case CRYPTO_ALG_TYPE_SKCIPHER:
2157 chcr_handle_cipher_resp(skcipher_request_cast(req),
6f76672b
HJ
2158 input, err);
2159 break;
6f76672b
HJ
2160 case CRYPTO_ALG_TYPE_AHASH:
2161 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162 }
2163 atomic_inc(&adap->chcr_stats.complete);
2164 return err;
2165}
324429d7
HS
2166static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167{
2168 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2169 struct chcr_ahash_req_ctx *state = out;
2170
44fce12a 2171 state->reqlen = req_ctx->reqlen;
324429d7 2172 state->data_len = req_ctx->data_len;
44fce12a 2173 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
2174 memcpy(state->partial_hash, req_ctx->partial_hash,
2175 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2176 chcr_init_hctx_per_wr(state);
fc6176a2 2177 return 0;
324429d7
HS
2178}
2179
2180static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181{
2182 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184
44fce12a 2185 req_ctx->reqlen = state->reqlen;
324429d7 2186 req_ctx->data_len = state->data_len;
44fce12a
HJ
2187 req_ctx->reqbfr = req_ctx->bfr1;
2188 req_ctx->skbfr = req_ctx->bfr2;
2189 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
2190 memcpy(req_ctx->partial_hash, state->partial_hash,
2191 CHCR_HASH_MAX_DIGEST_SIZE);
5110e655 2192 chcr_init_hctx_per_wr(req_ctx);
324429d7
HS
2193 return 0;
2194}
2195
2196static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2197 unsigned int keylen)
2198{
2f47d580 2199 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
324429d7
HS
2200 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2201 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2202 unsigned int i, err = 0, updated_digestsize;
2203
e7922729
HJ
2204 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205
2206 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
2207 * first request's data. opad will be sent with the final hash result
2208 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209 */
e7922729 2210 shash->tfm = hmacctx->base_hash;
324429d7 2211 if (keylen > bs) {
e7922729 2212 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
2213 hmacctx->ipad);
2214 if (err)
2215 goto out;
2216 keylen = digestsize;
2217 } else {
2218 memcpy(hmacctx->ipad, key, keylen);
2219 }
2220 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2221 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222
2223 for (i = 0; i < bs / sizeof(int); i++) {
2224 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2225 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2226 }
2227
2228 updated_digestsize = digestsize;
2229 if (digestsize == SHA224_DIGEST_SIZE)
2230 updated_digestsize = SHA256_DIGEST_SIZE;
2231 else if (digestsize == SHA384_DIGEST_SIZE)
2232 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 2233 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
2234 hmacctx->ipad, digestsize);
2235 if (err)
2236 goto out;
2237 chcr_change_order(hmacctx->ipad, updated_digestsize);
2238
e7922729 2239 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
2240 hmacctx->opad, digestsize);
2241 if (err)
2242 goto out;
2243 chcr_change_order(hmacctx->opad, updated_digestsize);
2244out:
2245 return err;
2246}
2247
7cea6d3e 2248static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
324429d7
HS
2249 unsigned int key_len)
2250{
2f47d580 2251 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
324429d7 2252 unsigned short context_size = 0;
b8fd1f41 2253 int err;
324429d7 2254
b8fd1f41
HJ
2255 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2256 if (err)
2257 goto badkey_err;
cc1b156d
HJ
2258
2259 memcpy(ablkctx->key, key, key_len);
2260 ablkctx->enckey_len = key_len;
2261 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2262 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
ee91ac1b
DSK
2263 /* Both keys for xts must be aligned to 16 byte boundary
2264 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265 */
2266 if (key_len == 48) {
2267 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268 + 16) >> 4;
2269 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2270 memset(ablkctx->key + 24, 0, 8);
2271 memset(ablkctx->key + 56, 0, 8);
2272 ablkctx->enckey_len = 64;
2273 ablkctx->key_ctx_hdr =
2274 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2275 CHCR_KEYCTX_NO_KEY, 1,
2276 0, context_size);
2277 } else {
2278 ablkctx->key_ctx_hdr =
cc1b156d
HJ
2279 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2280 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2281 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2282 CHCR_KEYCTX_NO_KEY, 1,
2283 0, context_size);
ee91ac1b 2284 }
cc1b156d
HJ
2285 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2286 return 0;
b8fd1f41 2287badkey_err:
b8fd1f41
HJ
2288 ablkctx->enckey_len = 0;
2289
2290 return err;
324429d7
HS
2291}
2292
2293static int chcr_sha_init(struct ahash_request *areq)
2294{
2295 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2296 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2297 int digestsize = crypto_ahash_digestsize(tfm);
2298
2299 req_ctx->data_len = 0;
44fce12a
HJ
2300 req_ctx->reqlen = 0;
2301 req_ctx->reqbfr = req_ctx->bfr1;
2302 req_ctx->skbfr = req_ctx->bfr2;
324429d7 2303 copy_hash_init_values(req_ctx->partial_hash, digestsize);
5110e655 2304
324429d7
HS
2305 return 0;
2306}
2307
2308static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309{
2310 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2311 sizeof(struct chcr_ahash_req_ctx));
2312 return chcr_device_init(crypto_tfm_ctx(tfm));
2313}
2314
2315static int chcr_hmac_init(struct ahash_request *areq)
2316{
2317 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2318 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2f47d580 2319 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
324429d7
HS
2320 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2321 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322
2323 chcr_sha_init(areq);
2324 req_ctx->data_len = bs;
2325 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2326 if (digestsize == SHA224_DIGEST_SIZE)
2327 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2328 SHA256_DIGEST_SIZE);
2329 else if (digestsize == SHA384_DIGEST_SIZE)
2330 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2331 SHA512_DIGEST_SIZE);
2332 else
2333 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334 digestsize);
2335 }
2336 return 0;
2337}
2338
2339static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340{
2341 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2342 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2343 unsigned int digestsize =
2344 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345
2346 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2347 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
2348 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2349 if (IS_ERR(hmacctx->base_hash))
2350 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
2351 return chcr_device_init(crypto_tfm_ctx(tfm));
2352}
2353
324429d7
HS
2354static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355{
2356 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2357 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358
e7922729
HJ
2359 if (hmacctx->base_hash) {
2360 chcr_free_shash(hmacctx->base_hash);
2361 hmacctx->base_hash = NULL;
324429d7
HS
2362 }
2363}
2364
4262c98a
HJ
2365inline void chcr_aead_common_exit(struct aead_request *req)
2366{
2367 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2368 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2369 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370
2371 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2372}
2373
2374static int chcr_aead_common_init(struct aead_request *req)
2debd332 2375{
2f47d580
HJ
2376 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2377 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2378 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2f47d580 2379 unsigned int authsize = crypto_aead_authsize(tfm);
4262c98a 2380 int error = -EINVAL;
2debd332 2381
2f47d580
HJ
2382 /* validate key size */
2383 if (aeadctx->enckey_len == 0)
2384 goto err;
4262c98a 2385 if (reqctx->op && req->cryptlen < authsize)
2f47d580 2386 goto err;
4262c98a
HJ
2387 if (reqctx->b0_len)
2388 reqctx->scratch_pad = reqctx->iv + IV;
2389 else
2390 reqctx->scratch_pad = NULL;
2391
2f47d580 2392 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
4262c98a 2393 reqctx->op);
2f47d580
HJ
2394 if (error) {
2395 error = -ENOMEM;
2396 goto err;
2397 }
1f479e4c 2398
2f47d580
HJ
2399 return 0;
2400err:
2401 return error;
2debd332 2402}
2f47d580
HJ
2403
2404static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
0e93708d
HJ
2405 int aadmax, int wrlen,
2406 unsigned short op_type)
2407{
2408 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409
2410 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2f47d580 2411 dst_nents > MAX_DSGL_ENT ||
0e93708d 2412 (req->assoclen > aadmax) ||
2f47d580 2413 (wrlen > SGE_MAX_WR_LEN))
0e93708d
HJ
2414 return 1;
2415 return 0;
2416}
2debd332 2417
0e93708d
HJ
2418static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419{
2420 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 2421 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
2422 struct aead_request *subreq = aead_request_ctx(req);
2423
2424 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2425 aead_request_set_callback(subreq, req->base.flags,
2426 req->base.complete, req->base.data);
4262c98a 2427 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
0e93708d 2428 req->iv);
fc6176a2 2429 aead_request_set_ad(subreq, req->assoclen);
0e93708d
HJ
2430 return op_type ? crypto_aead_decrypt(subreq) :
2431 crypto_aead_encrypt(subreq);
2432}
2debd332
HJ
2433
2434static struct sk_buff *create_authenc_wr(struct aead_request *req,
2435 unsigned short qid,
4262c98a 2436 int size)
2debd332
HJ
2437{
2438 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
2439 struct chcr_context *ctx = a_ctx(tfm);
2440 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
2441 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2443 struct sk_buff *skb = NULL;
2444 struct chcr_wr *chcr_req;
2445 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
2446 struct ulptx_sgl *ulptx;
2447 unsigned int transhdr_len;
3d64bd67 2448 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
1f479e4c 2449 unsigned int kctx_len = 0, dnents, snents;
2debd332 2450 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 2451 int error = -EINVAL;
1f479e4c 2452 u8 *ivptr;
2debd332
HJ
2453 int null = 0;
2454 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455 GFP_ATOMIC;
567be3a5
AS
2456 struct adapter *adap = padap(ctx->dev);
2457 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332 2458
2f47d580
HJ
2459 if (req->cryptlen == 0)
2460 return NULL;
2debd332 2461
4262c98a
HJ
2462 reqctx->b0_len = 0;
2463 error = chcr_aead_common_init(req);
2464 if (error)
2465 return ERR_PTR(error);
2466
3d64bd67 2467 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
4262c98a 2468 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2debd332 2469 null = 1;
2debd332 2470 }
1f479e4c
HJ
2471 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
5abc8db0 2473 dnents += MIN_AUTH_SG; // For IV
1f479e4c
HJ
2474 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475 CHCR_SRC_SG_SIZE, 0);
2f47d580 2476 dst_size = get_space_for_phys_dsgl(dnents);
ff462ddf 2477 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2debd332
HJ
2478 - sizeof(chcr_req->key_ctx);
2479 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 2480 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2f47d580 2481 SGE_MAX_WR_LEN;
1f479e4c
HJ
2482 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483 : (sgl_len(snents) * 8);
2f47d580 2484 transhdr_len += temp;
125d01ca 2485 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
2486
2487 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
4262c98a 2488 transhdr_len, reqctx->op)) {
ee0863ba 2489 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
2490 chcr_aead_common_exit(req);
2491 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 2492 }
1f479e4c 2493 skb = alloc_skb(transhdr_len, flags);
5fe8c711
HJ
2494 if (!skb) {
2495 error = -ENOMEM;
2debd332 2496 goto err;
5fe8c711 2497 }
2debd332 2498
de77b966 2499 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 2500
4262c98a 2501 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332
HJ
2502
2503 /*
2504 * Input order is AAD,IV and Payload. where IV should be included as
2505 * the part of authdata. All other fields should be filled according
2506 * to the hardware spec
2507 */
2508 chcr_req->sec_cpl.op_ivinsrtofst =
567be3a5 2509 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
1f479e4c 2510 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2debd332 2511 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
2512 null ? 0 : 1 + IV,
2513 null ? 0 : IV + req->assoclen,
2514 req->assoclen + IV + 1,
2f47d580 2515 (temp & 0x1F0) >> 4);
2debd332 2516 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2f47d580 2517 temp & 0xF,
1f479e4c 2518 null ? 0 : req->assoclen + IV + 1,
2f47d580 2519 temp, temp);
3d64bd67
HJ
2520 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523 else
2524 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
4262c98a
HJ
2525 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
3d64bd67 2527 temp,
2debd332 2528 actx->auth_mode, aeadctx->hmac_ctrl,
2f47d580 2529 IV >> 1);
2debd332 2530 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 2531 0, 0, dst_size);
2debd332
HJ
2532
2533 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
4262c98a 2534 if (reqctx->op == CHCR_ENCRYPT_OP ||
3d64bd67
HJ
2535 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2debd332
HJ
2537 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538 aeadctx->enckey_len);
2539 else
2540 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541 aeadctx->enckey_len);
2542
125d01ca
HJ
2543 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
1f479e4c
HJ
2545 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3d64bd67
HJ
2548 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
1f479e4c
HJ
2550 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
3d64bd67 2552 CTR_RFC3686_IV_SIZE);
1f479e4c 2553 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
3d64bd67
HJ
2554 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555 } else {
1f479e4c 2556 memcpy(ivptr, req->iv, IV);
3d64bd67 2557 }
1f479e4c
HJ
2558 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559 chcr_add_aead_src_ent(req, ulptx);
ee0863ba 2560 atomic_inc(&adap->chcr_stats.cipher_rqst);
1f479e4c
HJ
2561 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2f47d580
HJ
2563 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564 transhdr_len, temp, 0);
2debd332 2565 reqctx->skb = skb;
2debd332
HJ
2566
2567 return skb;
2debd332 2568err:
4262c98a 2569 chcr_aead_common_exit(req);
2f47d580 2570
5fe8c711 2571 return ERR_PTR(error);
2debd332
HJ
2572}
2573
6dad4e8a
AG
2574int chcr_aead_dma_map(struct device *dev,
2575 struct aead_request *req,
2576 unsigned short op_type)
2f47d580
HJ
2577{
2578 int error;
2579 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2580 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581 unsigned int authsize = crypto_aead_authsize(tfm);
fb90a1c8 2582 int src_len, dst_len;
2f47d580 2583
fb90a1c8
AS
2584 /* calculate and handle src and dst sg length separately
2585 * for inplace and out-of place operations
2586 */
2587 if (req->src == req->dst) {
2588 src_len = req->assoclen + req->cryptlen + (op_type ?
2589 0 : authsize);
2590 dst_len = src_len;
2591 } else {
2592 src_len = req->assoclen + req->cryptlen;
2593 dst_len = req->assoclen + req->cryptlen + (op_type ?
2594 -authsize : authsize);
2595 }
2596
2597 if (!req->cryptlen || !src_len || !dst_len)
2f47d580 2598 return 0;
4262c98a 2599 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2f47d580
HJ
2600 DMA_BIDIRECTIONAL);
2601 if (dma_mapping_error(dev, reqctx->iv_dma))
2602 return -ENOMEM;
4262c98a
HJ
2603 if (reqctx->b0_len)
2604 reqctx->b0_dma = reqctx->iv_dma + IV;
2605 else
2606 reqctx->b0_dma = 0;
2f47d580 2607 if (req->src == req->dst) {
9195189e 2608 error = dma_map_sg(dev, req->src,
fb90a1c8 2609 sg_nents_for_len(req->src, src_len),
9195189e 2610 DMA_BIDIRECTIONAL);
2f47d580
HJ
2611 if (!error)
2612 goto err;
2613 } else {
fb90a1c8
AS
2614 error = dma_map_sg(dev, req->src,
2615 sg_nents_for_len(req->src, src_len),
2f47d580
HJ
2616 DMA_TO_DEVICE);
2617 if (!error)
2618 goto err;
fb90a1c8
AS
2619 error = dma_map_sg(dev, req->dst,
2620 sg_nents_for_len(req->dst, dst_len),
2f47d580
HJ
2621 DMA_FROM_DEVICE);
2622 if (!error) {
fb90a1c8
AS
2623 dma_unmap_sg(dev, req->src,
2624 sg_nents_for_len(req->src, src_len),
2625 DMA_TO_DEVICE);
2f47d580
HJ
2626 goto err;
2627 }
2628 }
2629
2630 return 0;
2631err:
2632 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633 return -ENOMEM;
2634}
2635
6dad4e8a
AG
2636void chcr_aead_dma_unmap(struct device *dev,
2637 struct aead_request *req,
2638 unsigned short op_type)
2f47d580
HJ
2639{
2640 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2641 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642 unsigned int authsize = crypto_aead_authsize(tfm);
fb90a1c8 2643 int src_len, dst_len;
2f47d580 2644
fb90a1c8
AS
2645 /* calculate and handle src and dst sg length separately
2646 * for inplace and out-of place operations
2647 */
2648 if (req->src == req->dst) {
2649 src_len = req->assoclen + req->cryptlen + (op_type ?
2650 0 : authsize);
2651 dst_len = src_len;
2652 } else {
2653 src_len = req->assoclen + req->cryptlen;
2654 dst_len = req->assoclen + req->cryptlen + (op_type ?
2655 -authsize : authsize);
2656 }
2657
2658 if (!req->cryptlen || !src_len || !dst_len)
2f47d580
HJ
2659 return;
2660
4262c98a 2661 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2f47d580
HJ
2662 DMA_BIDIRECTIONAL);
2663 if (req->src == req->dst) {
d91a3159 2664 dma_unmap_sg(dev, req->src,
fb90a1c8 2665 sg_nents_for_len(req->src, src_len),
d91a3159 2666 DMA_BIDIRECTIONAL);
2f47d580 2667 } else {
fb90a1c8
AS
2668 dma_unmap_sg(dev, req->src,
2669 sg_nents_for_len(req->src, src_len),
2670 DMA_TO_DEVICE);
2671 dma_unmap_sg(dev, req->dst,
2672 sg_nents_for_len(req->dst, dst_len),
2673 DMA_FROM_DEVICE);
2f47d580
HJ
2674 }
2675}
2676
6dad4e8a 2677void chcr_add_aead_src_ent(struct aead_request *req,
1f479e4c 2678 struct ulptx_sgl *ulptx)
2f47d580
HJ
2679{
2680 struct ulptx_walk ulp_walk;
2681 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2682
2683 if (reqctx->imm) {
2684 u8 *buf = (u8 *)ulptx;
2685
4262c98a 2686 if (reqctx->b0_len) {
2f47d580
HJ
2687 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688 buf += reqctx->b0_len;
2689 }
2690 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1f479e4c 2691 buf, req->cryptlen + req->assoclen, 0);
2f47d580
HJ
2692 } else {
2693 ulptx_walk_init(&ulp_walk, ulptx);
4262c98a 2694 if (reqctx->b0_len)
2f47d580 2695 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
c4f6d44d 2696 reqctx->b0_dma);
1f479e4c
HJ
2697 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698 req->assoclen, 0);
2f47d580
HJ
2699 ulptx_walk_end(&ulp_walk);
2700 }
2701}
2702
6dad4e8a
AG
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704 struct cpl_rx_phys_dsgl *phys_cpl,
6dad4e8a 2705 unsigned short qid)
2f47d580
HJ
2706{
2707 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2708 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709 struct dsgl_walk dsgl_walk;
2710 unsigned int authsize = crypto_aead_authsize(tfm);
add92a81 2711 struct chcr_context *ctx = a_ctx(tfm);
2f47d580 2712 u32 temp;
567be3a5 2713 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2f47d580
HJ
2714
2715 dsgl_walk_init(&dsgl_walk, phys_cpl);
c4f6d44d 2716 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
1f479e4c
HJ
2717 temp = req->assoclen + req->cryptlen +
2718 (reqctx->op ? -authsize : authsize);
2719 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
567be3a5 2720 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2f47d580
HJ
2721}
2722
7cea6d3e 2723void chcr_add_cipher_src_ent(struct skcipher_request *req,
335bcc4a 2724 void *ulptx,
6dad4e8a 2725 struct cipher_wr_param *wrparam)
2f47d580
HJ
2726{
2727 struct ulptx_walk ulp_walk;
7cea6d3e 2728 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
335bcc4a 2729 u8 *buf = ulptx;
2f47d580 2730
335bcc4a
HJ
2731 memcpy(buf, reqctx->iv, IV);
2732 buf += IV;
2f47d580 2733 if (reqctx->imm) {
2f47d580
HJ
2734 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2735 buf, wrparam->bytes, reqctx->processed);
2736 } else {
335bcc4a 2737 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2f47d580
HJ
2738 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739 reqctx->src_ofst);
2740 reqctx->srcsg = ulp_walk.last_sg;
2741 reqctx->src_ofst = ulp_walk.last_sg_len;
2742 ulptx_walk_end(&ulp_walk);
2743 }
2744}
2745
7cea6d3e 2746void chcr_add_cipher_dst_ent(struct skcipher_request *req,
6dad4e8a
AG
2747 struct cpl_rx_phys_dsgl *phys_cpl,
2748 struct cipher_wr_param *wrparam,
2749 unsigned short qid)
2f47d580 2750{
7cea6d3e
AB
2751 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2752 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
add92a81 2753 struct chcr_context *ctx = c_ctx(tfm);
2f47d580 2754 struct dsgl_walk dsgl_walk;
567be3a5 2755 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2f47d580
HJ
2756
2757 dsgl_walk_init(&dsgl_walk, phys_cpl);
2f47d580
HJ
2758 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759 reqctx->dst_ofst);
2760 reqctx->dstsg = dsgl_walk.last_sg;
2761 reqctx->dst_ofst = dsgl_walk.last_sg_len;
567be3a5 2762 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2f47d580
HJ
2763}
2764
6dad4e8a
AG
2765void chcr_add_hash_src_ent(struct ahash_request *req,
2766 struct ulptx_sgl *ulptx,
2767 struct hash_wr_param *param)
2f47d580
HJ
2768{
2769 struct ulptx_walk ulp_walk;
2770 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771
5110e655 2772 if (reqctx->hctx_wr.imm) {
2f47d580
HJ
2773 u8 *buf = (u8 *)ulptx;
2774
2775 if (param->bfr_len) {
2776 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2777 buf += param->bfr_len;
2778 }
5110e655
HJ
2779
2780 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2781 sg_nents(reqctx->hctx_wr.srcsg), buf,
2782 param->sg_len, 0);
2f47d580
HJ
2783 } else {
2784 ulptx_walk_init(&ulp_walk, ulptx);
2785 if (param->bfr_len)
2786 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
c4f6d44d 2787 reqctx->hctx_wr.dma_addr);
5110e655
HJ
2788 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2789 param->sg_len, reqctx->hctx_wr.src_ofst);
2790 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2791 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
db6deea4 2792 ulptx_walk_end(&ulp_walk);
2f47d580
HJ
2793 }
2794}
2795
6dad4e8a
AG
2796int chcr_hash_dma_map(struct device *dev,
2797 struct ahash_request *req)
2f47d580
HJ
2798{
2799 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800 int error = 0;
2801
2802 if (!req->nbytes)
2803 return 0;
2804 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2805 DMA_TO_DEVICE);
2806 if (!error)
7814f552 2807 return -ENOMEM;
5110e655 2808 req_ctx->hctx_wr.is_sg_map = 1;
2f47d580
HJ
2809 return 0;
2810}
2811
6dad4e8a
AG
2812void chcr_hash_dma_unmap(struct device *dev,
2813 struct ahash_request *req)
2f47d580
HJ
2814{
2815 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2816
2817 if (!req->nbytes)
2818 return;
2819
2820 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821 DMA_TO_DEVICE);
5110e655 2822 req_ctx->hctx_wr.is_sg_map = 0;
2f47d580
HJ
2823
2824}
2825
6dad4e8a 2826int chcr_cipher_dma_map(struct device *dev,
7cea6d3e 2827 struct skcipher_request *req)
2f47d580
HJ
2828{
2829 int error;
2f47d580
HJ
2830
2831 if (req->src == req->dst) {
2832 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2833 DMA_BIDIRECTIONAL);
2834 if (!error)
2835 goto err;
2836 } else {
2837 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838 DMA_TO_DEVICE);
2839 if (!error)
2840 goto err;
2841 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2842 DMA_FROM_DEVICE);
2843 if (!error) {
2844 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2845 DMA_TO_DEVICE);
2846 goto err;
2847 }
2848 }
2849
2850 return 0;
2851err:
2f47d580
HJ
2852 return -ENOMEM;
2853}
6dad4e8a
AG
2854
2855void chcr_cipher_dma_unmap(struct device *dev,
7cea6d3e 2856 struct skcipher_request *req)
2f47d580 2857{
2f47d580
HJ
2858 if (req->src == req->dst) {
2859 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2860 DMA_BIDIRECTIONAL);
2861 } else {
2862 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863 DMA_TO_DEVICE);
2864 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2865 DMA_FROM_DEVICE);
2866 }
2867}
2868
2debd332
HJ
2869static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2870{
2871 __be32 data;
2872
2873 memset(block, 0, csize);
2874 block += csize;
2875
2876 if (csize >= 4)
2877 csize = 4;
2878 else if (msglen > (unsigned int)(1 << (8 * csize)))
2879 return -EOVERFLOW;
2880
2881 data = cpu_to_be32(msglen);
2882 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2883
2884 return 0;
2885}
2886
66af86d9 2887static int generate_b0(struct aead_request *req, u8 *ivptr,
2debd332
HJ
2888 unsigned short op_type)
2889{
2890 unsigned int l, lp, m;
2891 int rc;
2892 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2893 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2894 u8 *b0 = reqctx->scratch_pad;
2895
2896 m = crypto_aead_authsize(aead);
2897
1f479e4c 2898 memcpy(b0, ivptr, 16);
2debd332
HJ
2899
2900 lp = b0[0];
2901 l = lp + 1;
2902
2903 /* set m, bits 3-5 */
2904 *b0 |= (8 * ((m - 2) / 2));
2905
2906 /* set adata, bit 6, if associated data is used */
2907 if (req->assoclen)
2908 *b0 |= 64;
2909 rc = set_msg_len(b0 + 16 - l,
2910 (op_type == CHCR_DECRYPT_OP) ?
2911 req->cryptlen - m : req->cryptlen, l);
66af86d9
Y
2912
2913 return rc;
2debd332
HJ
2914}
2915
2916static inline int crypto_ccm_check_iv(const u8 *iv)
2917{
2918 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2919 if (iv[0] < 1 || iv[0] > 7)
2920 return -EINVAL;
2921
2922 return 0;
2923}
2924
2925static int ccm_format_packet(struct aead_request *req,
1f479e4c 2926 u8 *ivptr,
2debd332 2927 unsigned int sub_type,
4262c98a
HJ
2928 unsigned short op_type,
2929 unsigned int assoclen)
2debd332
HJ
2930{
2931 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1f479e4c
HJ
2932 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2933 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
2934 int rc = 0;
2935
2debd332 2936 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1f479e4c
HJ
2937 ivptr[0] = 3;
2938 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2939 memcpy(ivptr + 4, req->iv, 8);
2940 memset(ivptr + 12, 0, 4);
2debd332 2941 } else {
1f479e4c 2942 memcpy(ivptr, req->iv, 16);
2debd332 2943 }
4262c98a 2944 if (assoclen)
f3b140ad 2945 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
4262c98a 2946
66af86d9 2947 rc = generate_b0(req, ivptr, op_type);
2debd332 2948 /* zero the ctr value */
1f479e4c 2949 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2debd332
HJ
2950 return rc;
2951}
2952
2953static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2954 unsigned int dst_size,
2955 struct aead_request *req,
2f47d580 2956 unsigned short op_type)
2debd332
HJ
2957{
2958 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
2959 struct chcr_context *ctx = a_ctx(tfm);
2960 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2961 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2debd332
HJ
2962 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2963 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
567be3a5 2964 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332 2965 unsigned int ccm_xtra;
10b0c75d 2966 unsigned int tag_offset = 0, auth_offset = 0;
2debd332
HJ
2967 unsigned int assoclen;
2968
2969 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2970 assoclen = req->assoclen - 8;
2971 else
2972 assoclen = req->assoclen;
2973 ccm_xtra = CCM_B0_SIZE +
2974 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975
2976 auth_offset = req->cryptlen ?
1f479e4c 2977 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2debd332
HJ
2978 if (op_type == CHCR_DECRYPT_OP) {
2979 if (crypto_aead_authsize(tfm) != req->cryptlen)
2980 tag_offset = crypto_aead_authsize(tfm);
2981 else
2982 auth_offset = 0;
2983 }
2984
567be3a5 2985 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2debd332 2986 sec_cpl->pldlen =
1f479e4c 2987 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2debd332
HJ
2988 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2989 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
2990 1 + IV, IV + assoclen + ccm_xtra,
2991 req->assoclen + IV + 1 + ccm_xtra, 0);
2debd332
HJ
2992
2993 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2994 auth_offset, tag_offset,
2995 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2996 crypto_aead_authsize(tfm));
2997 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2998 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
0a7bd30c 2999 cipher_mode, mac_mode,
2f47d580 3000 aeadctx->hmac_ctrl, IV >> 1);
2debd332
HJ
3001
3002 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2f47d580 3003 0, dst_size);
2debd332
HJ
3004}
3005
1efb892b
CIK
3006static int aead_ccm_validate_input(unsigned short op_type,
3007 struct aead_request *req,
3008 struct chcr_aead_ctx *aeadctx,
3009 unsigned int sub_type)
2debd332
HJ
3010{
3011 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3012 if (crypto_ccm_check_iv(req->iv)) {
3013 pr_err("CCM: IV check fails\n");
3014 return -EINVAL;
3015 }
3016 } else {
3017 if (req->assoclen != 16 && req->assoclen != 20) {
3018 pr_err("RFC4309: Invalid AAD length %d\n",
3019 req->assoclen);
3020 return -EINVAL;
3021 }
3022 }
2debd332
HJ
3023 return 0;
3024}
3025
2debd332
HJ
3026static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3027 unsigned short qid,
4262c98a 3028 int size)
2debd332
HJ
3029{
3030 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2f47d580 3031 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3032 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3033 struct sk_buff *skb = NULL;
3034 struct chcr_wr *chcr_req;
3035 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580
HJ
3036 struct ulptx_sgl *ulptx;
3037 unsigned int transhdr_len;
1f479e4c 3038 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2f47d580 3039 unsigned int sub_type, assoclen = req->assoclen;
2debd332 3040 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 3041 int error = -EINVAL;
1f479e4c 3042 u8 *ivptr;
2debd332
HJ
3043 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044 GFP_ATOMIC;
2f47d580 3045 struct adapter *adap = padap(a_ctx(tfm)->dev);
2debd332 3046
2f47d580
HJ
3047 sub_type = get_aead_subtype(tfm);
3048 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049 assoclen -= 8;
4262c98a
HJ
3050 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3051 error = chcr_aead_common_init(req);
2f47d580
HJ
3052 if (error)
3053 return ERR_PTR(error);
0e93708d 3054
4262c98a 3055 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
5fe8c711 3056 if (error)
2debd332 3057 goto err;
1f479e4c 3058 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
4262c98a 3059 + (reqctx->op ? -authsize : authsize),
1f479e4c 3060 CHCR_DST_SG_SIZE, 0);
e1a018e6 3061 dnents += MIN_CCM_SG; // For IV and B0
2f47d580 3062 dst_size = get_space_for_phys_dsgl(dnents);
1f479e4c
HJ
3063 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3064 CHCR_SRC_SG_SIZE, 0);
3065 snents += MIN_CCM_SG; //For B0
125d01ca 3066 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2debd332 3067 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 3068 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2f47d580 3069 reqctx->b0_len) <= SGE_MAX_WR_LEN;
1f479e4c 3070 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
125d01ca 3071 reqctx->b0_len, 16) :
1f479e4c 3072 (sgl_len(snents) * 8);
2f47d580 3073 transhdr_len += temp;
125d01ca 3074 transhdr_len = roundup(transhdr_len, 16);
2f47d580
HJ
3075
3076 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
1f479e4c 3077 reqctx->b0_len, transhdr_len, reqctx->op)) {
ee0863ba 3078 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
3079 chcr_aead_common_exit(req);
3080 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 3081 }
1f479e4c 3082 skb = alloc_skb(transhdr_len, flags);
2debd332 3083
5fe8c711
HJ
3084 if (!skb) {
3085 error = -ENOMEM;
2debd332 3086 goto err;
5fe8c711 3087 }
2debd332 3088
1f479e4c 3089 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 3090
4262c98a 3091 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2debd332
HJ
3092
3093 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3094 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
3095 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3096 aeadctx->key, aeadctx->enckey_len);
2debd332
HJ
3097
3098 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1f479e4c
HJ
3099 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3100 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3101 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
5fe8c711 3102 if (error)
2debd332 3103 goto dstmap_fail;
1f479e4c
HJ
3104 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3105 chcr_add_aead_src_ent(req, ulptx);
2debd332 3106
ee0863ba 3107 atomic_inc(&adap->chcr_stats.aead_rqst);
1f479e4c
HJ
3108 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3109 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2f47d580
HJ
3110 reqctx->b0_len) : 0);
3111 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3112 transhdr_len, temp, 0);
2debd332 3113 reqctx->skb = skb;
2f47d580 3114
2debd332
HJ
3115 return skb;
3116dstmap_fail:
3117 kfree_skb(skb);
2debd332 3118err:
4262c98a 3119 chcr_aead_common_exit(req);
5fe8c711 3120 return ERR_PTR(error);
2debd332
HJ
3121}
3122
3123static struct sk_buff *create_gcm_wr(struct aead_request *req,
3124 unsigned short qid,
4262c98a 3125 int size)
2debd332
HJ
3126{
3127 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
3128 struct chcr_context *ctx = a_ctx(tfm);
3129 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
3130 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3131 struct sk_buff *skb = NULL;
3132 struct chcr_wr *chcr_req;
3133 struct cpl_rx_phys_dsgl *phys_cpl;
2f47d580 3134 struct ulptx_sgl *ulptx;
1f479e4c 3135 unsigned int transhdr_len, dnents = 0, snents;
2f47d580 3136 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2debd332 3137 unsigned int authsize = crypto_aead_authsize(tfm);
2f47d580 3138 int error = -EINVAL;
1f479e4c 3139 u8 *ivptr;
2debd332
HJ
3140 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141 GFP_ATOMIC;
567be3a5
AS
3142 struct adapter *adap = padap(ctx->dev);
3143 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2debd332 3144
2f47d580
HJ
3145 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3146 assoclen = req->assoclen - 8;
2debd332 3147
4262c98a
HJ
3148 reqctx->b0_len = 0;
3149 error = chcr_aead_common_init(req);
e1a018e6
HJ
3150 if (error)
3151 return ERR_PTR(error);
1f479e4c 3152 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
4262c98a 3153 (reqctx->op ? -authsize : authsize),
1f479e4c
HJ
3154 CHCR_DST_SG_SIZE, 0);
3155 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3156 CHCR_SRC_SG_SIZE, 0);
e1a018e6 3157 dnents += MIN_GCM_SG; // For IV
2f47d580 3158 dst_size = get_space_for_phys_dsgl(dnents);
125d01ca 3159 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2debd332 3160 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1f479e4c 3161 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
2f47d580 3162 SGE_MAX_WR_LEN;
1f479e4c
HJ
3163 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3164 (sgl_len(snents) * 8);
2f47d580 3165 transhdr_len += temp;
125d01ca 3166 transhdr_len = roundup(transhdr_len, 16);
2f47d580 3167 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
4262c98a
HJ
3168 transhdr_len, reqctx->op)) {
3169
ee0863ba 3170 atomic_inc(&adap->chcr_stats.fallback);
4262c98a
HJ
3171 chcr_aead_common_exit(req);
3172 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
0e93708d 3173 }
1f479e4c 3174 skb = alloc_skb(transhdr_len, flags);
5fe8c711
HJ
3175 if (!skb) {
3176 error = -ENOMEM;
2debd332 3177 goto err;
5fe8c711 3178 }
2debd332 3179
de77b966 3180 chcr_req = __skb_put_zero(skb, transhdr_len);
2debd332 3181
2f47d580 3182 //Offset of tag from end
4262c98a 3183 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2debd332 3184 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
567be3a5 3185 rx_channel_id, 2, 1);
0e93708d 3186 chcr_req->sec_cpl.pldlen =
1f479e4c 3187 htonl(req->assoclen + IV + req->cryptlen);
2debd332 3188 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1f479e4c
HJ
3189 assoclen ? 1 + IV : 0,
3190 assoclen ? IV + assoclen : 0,
3191 req->assoclen + IV + 1, 0);
e1a018e6 3192 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1f479e4c 3193 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
2f47d580 3194 temp, temp);
e1a018e6 3195 chcr_req->sec_cpl.seqno_numivs =
4262c98a 3196 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2debd332
HJ
3197 CHCR_ENCRYPT_OP) ? 1 : 0,
3198 CHCR_SCMD_CIPHER_MODE_AES_GCM,
0a7bd30c 3199 CHCR_SCMD_AUTH_MODE_GHASH,
2f47d580 3200 aeadctx->hmac_ctrl, IV >> 1);
2debd332 3201 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2f47d580 3202 0, 0, dst_size);
2debd332
HJ
3203 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3204 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
125d01ca
HJ
3205 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3206 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2debd332 3207
1f479e4c
HJ
3208 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3209 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2debd332
HJ
3210 /* prepare a 16 byte iv */
3211 /* S A L T | IV | 0x00000001 */
3212 if (get_aead_subtype(tfm) ==
3213 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1f479e4c
HJ
3214 memcpy(ivptr, aeadctx->salt, 4);
3215 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
2debd332 3216 } else {
1f479e4c 3217 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
2debd332 3218 }
f3b140ad 3219 put_unaligned_be32(0x01, &ivptr[12]);
1f479e4c 3220 ulptx = (struct ulptx_sgl *)(ivptr + 16);
2debd332 3221
1f479e4c
HJ
3222 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3223 chcr_add_aead_src_ent(req, ulptx);
ee0863ba 3224 atomic_inc(&adap->chcr_stats.aead_rqst);
1f479e4c
HJ
3225 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3226 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2f47d580
HJ
3227 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3228 transhdr_len, temp, reqctx->verify);
2debd332 3229 reqctx->skb = skb;
2debd332
HJ
3230 return skb;
3231
2debd332 3232err:
4262c98a 3233 chcr_aead_common_exit(req);
5fe8c711 3234 return ERR_PTR(error);
2debd332
HJ
3235}
3236
3237
3238
3239static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240{
2f47d580 3241 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d
HJ
3242 struct aead_alg *alg = crypto_aead_alg(tfm);
3243
3244 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
5fe8c711
HJ
3245 CRYPTO_ALG_NEED_FALLBACK |
3246 CRYPTO_ALG_ASYNC);
0e93708d
HJ
3247 if (IS_ERR(aeadctx->sw_cipher))
3248 return PTR_ERR(aeadctx->sw_cipher);
3249 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3250 sizeof(struct aead_request) +
3251 crypto_aead_reqsize(aeadctx->sw_cipher)));
2f47d580 3252 return chcr_device_init(a_ctx(tfm));
2debd332
HJ
3253}
3254
3255static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256{
2f47d580 3257 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
0e93708d 3258
0e93708d 3259 crypto_free_aead(aeadctx->sw_cipher);
2debd332
HJ
3260}
3261
3262static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3263 unsigned int authsize)
3264{
2f47d580 3265 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3266
3267 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3268 aeadctx->mayverify = VERIFY_HW;
0e93708d 3269 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3270}
3271static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3272 unsigned int authsize)
3273{
2f47d580 3274 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3275 u32 maxauth = crypto_aead_maxauthsize(tfm);
3276
3277 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3278 * true for sha1. authsize == 12 condition should be before
3279 * authsize == (maxauth >> 1)
3280 */
3281 if (authsize == ICV_4) {
3282 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3283 aeadctx->mayverify = VERIFY_HW;
3284 } else if (authsize == ICV_6) {
3285 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3286 aeadctx->mayverify = VERIFY_HW;
3287 } else if (authsize == ICV_10) {
3288 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3289 aeadctx->mayverify = VERIFY_HW;
3290 } else if (authsize == ICV_12) {
3291 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3292 aeadctx->mayverify = VERIFY_HW;
3293 } else if (authsize == ICV_14) {
3294 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3295 aeadctx->mayverify = VERIFY_HW;
3296 } else if (authsize == (maxauth >> 1)) {
3297 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3298 aeadctx->mayverify = VERIFY_HW;
3299 } else if (authsize == maxauth) {
3300 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3301 aeadctx->mayverify = VERIFY_HW;
3302 } else {
3303 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3304 aeadctx->mayverify = VERIFY_SW;
3305 }
0e93708d 3306 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3307}
3308
3309
3310static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311{
2f47d580 3312 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3313
3314 switch (authsize) {
3315 case ICV_4:
3316 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3317 aeadctx->mayverify = VERIFY_HW;
3318 break;
3319 case ICV_8:
3320 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3321 aeadctx->mayverify = VERIFY_HW;
3322 break;
3323 case ICV_12:
fc6176a2
CIK
3324 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3325 aeadctx->mayverify = VERIFY_HW;
2debd332
HJ
3326 break;
3327 case ICV_14:
fc6176a2
CIK
3328 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3329 aeadctx->mayverify = VERIFY_HW;
2debd332
HJ
3330 break;
3331 case ICV_16:
3332 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3333 aeadctx->mayverify = VERIFY_HW;
3334 break;
3335 case ICV_13:
3336 case ICV_15:
3337 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3338 aeadctx->mayverify = VERIFY_SW;
3339 break;
3340 default:
2debd332
HJ
3341 return -EINVAL;
3342 }
0e93708d 3343 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3344}
3345
3346static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3347 unsigned int authsize)
3348{
2f47d580 3349 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3350
3351 switch (authsize) {
3352 case ICV_8:
3353 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3354 aeadctx->mayverify = VERIFY_HW;
3355 break;
3356 case ICV_12:
3357 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3358 aeadctx->mayverify = VERIFY_HW;
3359 break;
3360 case ICV_16:
3361 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3362 aeadctx->mayverify = VERIFY_HW;
3363 break;
3364 default:
2debd332
HJ
3365 return -EINVAL;
3366 }
0e93708d 3367 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3368}
3369
3370static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3371 unsigned int authsize)
3372{
2f47d580 3373 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2debd332
HJ
3374
3375 switch (authsize) {
3376 case ICV_4:
3377 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3378 aeadctx->mayverify = VERIFY_HW;
3379 break;
3380 case ICV_6:
3381 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3382 aeadctx->mayverify = VERIFY_HW;
3383 break;
3384 case ICV_8:
3385 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3386 aeadctx->mayverify = VERIFY_HW;
3387 break;
3388 case ICV_10:
3389 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3390 aeadctx->mayverify = VERIFY_HW;
3391 break;
3392 case ICV_12:
3393 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3394 aeadctx->mayverify = VERIFY_HW;
3395 break;
3396 case ICV_14:
3397 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3398 aeadctx->mayverify = VERIFY_HW;
3399 break;
3400 case ICV_16:
3401 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3402 aeadctx->mayverify = VERIFY_HW;
3403 break;
3404 default:
2debd332
HJ
3405 return -EINVAL;
3406 }
0e93708d 3407 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
3408}
3409
0e93708d 3410static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2debd332
HJ
3411 const u8 *key,
3412 unsigned int keylen)
3413{
2f47d580 3414 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332
HJ
3415 unsigned char ck_size, mk_size;
3416 int key_ctx_size = 0;
3417
125d01ca 3418 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
2debd332 3419 if (keylen == AES_KEYSIZE_128) {
2debd332 3420 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
125d01ca 3421 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2debd332
HJ
3422 } else if (keylen == AES_KEYSIZE_192) {
3423 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3424 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3425 } else if (keylen == AES_KEYSIZE_256) {
3426 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3427 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428 } else {
2debd332
HJ
3429 aeadctx->enckey_len = 0;
3430 return -EINVAL;
3431 }
3432 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433 key_ctx_size >> 4);
0e93708d
HJ
3434 memcpy(aeadctx->key, key, keylen);
3435 aeadctx->enckey_len = keylen;
3436
2debd332
HJ
3437 return 0;
3438}
3439
0e93708d
HJ
3440static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441 const u8 *key,
3442 unsigned int keylen)
3443{
2f47d580 3444 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
0e93708d
HJ
3445 int error;
3446
3447 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3448 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3449 CRYPTO_TFM_REQ_MASK);
3450 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3451 if (error)
3452 return error;
3453 return chcr_ccm_common_setkey(aead, key, keylen);
3454}
3455
2debd332
HJ
3456static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3457 unsigned int keylen)
3458{
2f47d580 3459 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
4dbeae42 3460 int error;
2debd332
HJ
3461
3462 if (keylen < 3) {
2debd332
HJ
3463 aeadctx->enckey_len = 0;
3464 return -EINVAL;
3465 }
4dbeae42
HJ
3466 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3467 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3468 CRYPTO_TFM_REQ_MASK);
3469 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
4dbeae42
HJ
3470 if (error)
3471 return error;
2debd332
HJ
3472 keylen -= 3;
3473 memcpy(aeadctx->salt, key + keylen, 3);
0e93708d 3474 return chcr_ccm_common_setkey(aead, key, keylen);
2debd332
HJ
3475}
3476
3477static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3478 unsigned int keylen)
3479{
2f47d580 3480 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
2debd332 3481 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2debd332
HJ
3482 unsigned int ck_size;
3483 int ret = 0, key_ctx_size = 0;
571c47ab 3484 struct crypto_aes_ctx aes;
2debd332 3485
0e93708d
HJ
3486 aeadctx->enckey_len = 0;
3487 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3488 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3489 & CRYPTO_TFM_REQ_MASK);
3490 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3491 if (ret)
3492 goto out;
3493
7c2cf1c4
HJ
3494 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495 keylen > 3) {
2debd332
HJ
3496 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3497 memcpy(aeadctx->salt, key + keylen, 4);
3498 }
3499 if (keylen == AES_KEYSIZE_128) {
3500 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3501 } else if (keylen == AES_KEYSIZE_192) {
3502 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3503 } else if (keylen == AES_KEYSIZE_256) {
3504 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505 } else {
0e93708d 3506 pr_err("GCM: Invalid key length %d\n", keylen);
2debd332
HJ
3507 ret = -EINVAL;
3508 goto out;
3509 }
3510
3511 memcpy(aeadctx->key, key, keylen);
3512 aeadctx->enckey_len = keylen;
125d01ca 3513 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
2debd332 3514 AEAD_H_SIZE;
125d01ca 3515 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2debd332
HJ
3516 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3517 0, 0,
3518 key_ctx_size >> 4);
8356ea51
HJ
3519 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3520 * It will go in key context
2debd332 3521 */
571c47ab 3522 ret = aes_expandkey(&aes, key, keylen);
2debd332
HJ
3523 if (ret) {
3524 aeadctx->enckey_len = 0;
571c47ab 3525 goto out;
2debd332
HJ
3526 }
3527 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
571c47ab
AB
3528 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3529 memzero_explicit(&aes, sizeof(aes));
2debd332 3530
2debd332
HJ
3531out:
3532 return ret;
3533}
3534
3535static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3536 unsigned int keylen)
3537{
2f47d580 3538 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3539 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3540 /* it contains auth and cipher key both*/
3541 struct crypto_authenc_keys keys;
3d64bd67 3542 unsigned int bs, subtype;
2debd332
HJ
3543 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3544 int err = 0, i, key_ctx_len = 0;
3545 unsigned char ck_size = 0;
3546 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
ec1bca94 3547 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2debd332
HJ
3548 struct algo_param param;
3549 int align;
3550 u8 *o_ptr = NULL;
3551
0e93708d
HJ
3552 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3553 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3554 & CRYPTO_TFM_REQ_MASK);
3555 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3556 if (err)
3557 goto out;
3558
674f368a 3559 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2debd332 3560 goto out;
2debd332
HJ
3561
3562 if (get_alg_config(&param, max_authsize)) {
3563 pr_err("chcr : Unsupported digest size\n");
3564 goto out;
3565 }
3d64bd67
HJ
3566 subtype = get_aead_subtype(authenc);
3567 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3568 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3569 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570 goto out;
3571 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3572 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3573 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574 }
2debd332
HJ
3575 if (keys.enckeylen == AES_KEYSIZE_128) {
3576 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3577 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3578 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3579 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3580 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581 } else {
3582 pr_err("chcr : Unsupported cipher key\n");
3583 goto out;
3584 }
3585
3586 /* Copy only encryption key. We use authkey to generate h(ipad) and
3587 * h(opad) so authkey is not needed again. authkeylen size have the
3588 * size of the hash digest size.
3589 */
3590 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3591 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3592 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3593 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
2debd332 3594
3d64bd67
HJ
3595 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3596 aeadctx->enckey_len << 3);
3597 }
2debd332
HJ
3598 base_hash = chcr_alloc_shash(max_authsize);
3599 if (IS_ERR(base_hash)) {
3600 pr_err("chcr : Base driver cannot be loaded\n");
0e93708d 3601 aeadctx->enckey_len = 0;
eb526531 3602 memzero_explicit(&keys, sizeof(keys));
0e93708d 3603 return -EINVAL;
324429d7 3604 }
2debd332
HJ
3605 {
3606 SHASH_DESC_ON_STACK(shash, base_hash);
6faa0f57 3607
2debd332 3608 shash->tfm = base_hash;
2debd332
HJ
3609 bs = crypto_shash_blocksize(base_hash);
3610 align = KEYCTX_ALIGN_PAD(max_authsize);
3611 o_ptr = actx->h_iopad + param.result_size + align;
3612
3613 if (keys.authkeylen > bs) {
3614 err = crypto_shash_digest(shash, keys.authkey,
3615 keys.authkeylen,
3616 o_ptr);
3617 if (err) {
3618 pr_err("chcr : Base driver cannot be loaded\n");
3619 goto out;
3620 }
3621 keys.authkeylen = max_authsize;
3622 } else
3623 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3624
3625 /* Compute the ipad-digest*/
3626 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3627 memcpy(pad, o_ptr, keys.authkeylen);
3628 for (i = 0; i < bs >> 2; i++)
3629 *((unsigned int *)pad + i) ^= IPAD_DATA;
3630
3631 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3632 max_authsize))
3633 goto out;
3634 /* Compute the opad-digest */
3635 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3636 memcpy(pad, o_ptr, keys.authkeylen);
3637 for (i = 0; i < bs >> 2; i++)
3638 *((unsigned int *)pad + i) ^= OPAD_DATA;
3639
3640 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3641 goto out;
3642
3643 /* convert the ipad and opad digest to network order */
3644 chcr_change_order(actx->h_iopad, param.result_size);
3645 chcr_change_order(o_ptr, param.result_size);
3646 key_ctx_len = sizeof(struct _key_ctx) +
125d01ca 3647 roundup(keys.enckeylen, 16) +
2debd332
HJ
3648 (param.result_size + align) * 2;
3649 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3650 0, 1, key_ctx_len >> 4);
3651 actx->auth_mode = param.auth_mode;
3652 chcr_free_shash(base_hash);
3653
eb526531 3654 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3655 return 0;
3656 }
3657out:
3658 aeadctx->enckey_len = 0;
eb526531 3659 memzero_explicit(&keys, sizeof(keys));
ec1bca94 3660 if (!IS_ERR(base_hash))
2debd332
HJ
3661 chcr_free_shash(base_hash);
3662 return -EINVAL;
324429d7
HS
3663}
3664
2debd332
HJ
3665static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3666 const u8 *key, unsigned int keylen)
3667{
2f47d580 3668 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
2debd332
HJ
3669 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3670 struct crypto_authenc_keys keys;
0e93708d 3671 int err;
2debd332 3672 /* it contains auth and cipher key both*/
3d64bd67 3673 unsigned int subtype;
2debd332
HJ
3674 int key_ctx_len = 0;
3675 unsigned char ck_size = 0;
3676
0e93708d
HJ
3677 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3678 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3679 & CRYPTO_TFM_REQ_MASK);
3680 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
0e93708d
HJ
3681 if (err)
3682 goto out;
3683
674f368a 3684 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2debd332 3685 goto out;
674f368a 3686
3d64bd67
HJ
3687 subtype = get_aead_subtype(authenc);
3688 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3689 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3690 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3691 goto out;
3692 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3693 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3694 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3695 }
2debd332
HJ
3696 if (keys.enckeylen == AES_KEYSIZE_128) {
3697 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3698 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3699 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3700 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3701 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3702 } else {
3d64bd67 3703 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
2debd332
HJ
3704 goto out;
3705 }
3706 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3707 aeadctx->enckey_len = keys.enckeylen;
3d64bd67
HJ
3708 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3709 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3710 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3711 aeadctx->enckey_len << 3);
3712 }
125d01ca 3713 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
2debd332
HJ
3714
3715 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3716 0, key_ctx_len >> 4);
3717 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
eb526531 3718 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3719 return 0;
3720out:
3721 aeadctx->enckey_len = 0;
eb526531 3722 memzero_explicit(&keys, sizeof(keys));
2debd332
HJ
3723 return -EINVAL;
3724}
6dad4e8a
AG
3725
3726static int chcr_aead_op(struct aead_request *req,
6dad4e8a
AG
3727 int size,
3728 create_wr_t create_wr_fn)
3729{
3730 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
fef4912b 3731 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
567be3a5
AS
3732 struct chcr_context *ctx = a_ctx(tfm);
3733 struct uld_ctx *u_ctx = ULD_CTX(ctx);
6dad4e8a 3734 struct sk_buff *skb;
fef4912b 3735 struct chcr_dev *cdev;
6dad4e8a 3736
fef4912b
HJ
3737 cdev = a_ctx(tfm)->dev;
3738 if (!cdev) {
6dad4e8a
AG
3739 pr_err("chcr : %s : No crypto device.\n", __func__);
3740 return -ENXIO;
3741 }
fef4912b
HJ
3742
3743 if (chcr_inc_wrcount(cdev)) {
3744 /* Detach state for CHCR means lldi or padap is freed.
3745 * We cannot increment fallback here.
3746 */
3747 return chcr_aead_fallback(req, reqctx->op);
3748 }
3749
6dad4e8a 3750 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
567be3a5
AS
3751 reqctx->txqidx) &&
3752 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
fef4912b 3753 chcr_dec_wrcount(cdev);
6faa0f57 3754 return -ENOSPC;
6dad4e8a
AG
3755 }
3756
d91a3159
DSK
3757 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3758 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3759 pr_err("RFC4106: Invalid value of assoclen %d\n",
3760 req->assoclen);
3761 return -EINVAL;
3762 }
3763
6dad4e8a 3764 /* Form a WR from req */
567be3a5 3765 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
6dad4e8a 3766
b04a27ca 3767 if (IS_ERR_OR_NULL(skb)) {
fef4912b 3768 chcr_dec_wrcount(cdev);
b04a27ca 3769 return PTR_ERR_OR_ZERO(skb);
fef4912b 3770 }
6dad4e8a
AG
3771
3772 skb->dev = u_ctx->lldi.ports[0];
567be3a5 3773 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
6dad4e8a 3774 chcr_send_wr(skb);
567be3a5 3775 return -EINPROGRESS;
6dad4e8a
AG
3776}
3777
2debd332
HJ
3778static int chcr_aead_encrypt(struct aead_request *req)
3779{
3780 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3781 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
567be3a5
AS
3782 struct chcr_context *ctx = a_ctx(tfm);
3783 unsigned int cpu;
3784
3785 cpu = get_cpu();
3786 reqctx->txqidx = cpu % ctx->ntxq;
3787 reqctx->rxqidx = cpu % ctx->nrxq;
3788 put_cpu();
2debd332
HJ
3789
3790 reqctx->verify = VERIFY_HW;
4262c98a 3791 reqctx->op = CHCR_ENCRYPT_OP;
2debd332
HJ
3792
3793 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3794 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3795 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3796 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3797 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
4262c98a 3798 return chcr_aead_op(req, 0, create_authenc_wr);
2debd332
HJ
3799 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3800 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
4262c98a 3801 return chcr_aead_op(req, 0, create_aead_ccm_wr);
2debd332 3802 default:
4262c98a 3803 return chcr_aead_op(req, 0, create_gcm_wr);
2debd332
HJ
3804 }
3805}
3806
3807static int chcr_aead_decrypt(struct aead_request *req)
3808{
3809 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567be3a5
AS
3810 struct chcr_context *ctx = a_ctx(tfm);
3811 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2debd332
HJ
3812 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3813 int size;
567be3a5
AS
3814 unsigned int cpu;
3815
3816 cpu = get_cpu();
3817 reqctx->txqidx = cpu % ctx->ntxq;
3818 reqctx->rxqidx = cpu % ctx->nrxq;
3819 put_cpu();
2debd332
HJ
3820
3821 if (aeadctx->mayverify == VERIFY_SW) {
3822 size = crypto_aead_maxauthsize(tfm);
3823 reqctx->verify = VERIFY_SW;
3824 } else {
3825 size = 0;
3826 reqctx->verify = VERIFY_HW;
3827 }
4262c98a 3828 reqctx->op = CHCR_DECRYPT_OP;
2debd332 3829 switch (get_aead_subtype(tfm)) {
3d64bd67
HJ
3830 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3831 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3832 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3833 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
4262c98a 3834 return chcr_aead_op(req, size, create_authenc_wr);
2debd332
HJ
3835 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3836 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
4262c98a 3837 return chcr_aead_op(req, size, create_aead_ccm_wr);
2debd332 3838 default:
4262c98a 3839 return chcr_aead_op(req, size, create_gcm_wr);
2debd332
HJ
3840 }
3841}
3842
324429d7
HS
3843static struct chcr_alg_template driver_algs[] = {
3844 /* AES-CBC */
3845 {
7cea6d3e 3846 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
324429d7 3847 .is_registered = 0,
7cea6d3e
AB
3848 .alg.skcipher = {
3849 .base.cra_name = "cbc(aes)",
3850 .base.cra_driver_name = "cbc-aes-chcr",
3851 .base.cra_blocksize = AES_BLOCK_SIZE,
3852
3853 .init = chcr_init_tfm,
3854 .exit = chcr_exit_tfm,
3855 .min_keysize = AES_MIN_KEY_SIZE,
3856 .max_keysize = AES_MAX_KEY_SIZE,
3857 .ivsize = AES_BLOCK_SIZE,
3858 .setkey = chcr_aes_cbc_setkey,
3859 .encrypt = chcr_aes_encrypt,
3860 .decrypt = chcr_aes_decrypt,
324429d7 3861 }
324429d7
HS
3862 },
3863 {
7cea6d3e 3864 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
324429d7 3865 .is_registered = 0,
7cea6d3e
AB
3866 .alg.skcipher = {
3867 .base.cra_name = "xts(aes)",
3868 .base.cra_driver_name = "xts-aes-chcr",
3869 .base.cra_blocksize = AES_BLOCK_SIZE,
3870
3871 .init = chcr_init_tfm,
3872 .exit = chcr_exit_tfm,
3873 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3874 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3875 .ivsize = AES_BLOCK_SIZE,
3876 .setkey = chcr_aes_xts_setkey,
3877 .encrypt = chcr_aes_encrypt,
3878 .decrypt = chcr_aes_decrypt,
324429d7 3879 }
b8fd1f41
HJ
3880 },
3881 {
7cea6d3e 3882 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
b8fd1f41 3883 .is_registered = 0,
7cea6d3e
AB
3884 .alg.skcipher = {
3885 .base.cra_name = "ctr(aes)",
3886 .base.cra_driver_name = "ctr-aes-chcr",
3887 .base.cra_blocksize = 1,
3888
3889 .init = chcr_init_tfm,
3890 .exit = chcr_exit_tfm,
3891 .min_keysize = AES_MIN_KEY_SIZE,
3892 .max_keysize = AES_MAX_KEY_SIZE,
3893 .ivsize = AES_BLOCK_SIZE,
3894 .setkey = chcr_aes_ctr_setkey,
3895 .encrypt = chcr_aes_encrypt,
3896 .decrypt = chcr_aes_decrypt,
b8fd1f41
HJ
3897 }
3898 },
3899 {
7cea6d3e 3900 .type = CRYPTO_ALG_TYPE_SKCIPHER |
b8fd1f41
HJ
3901 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3902 .is_registered = 0,
7cea6d3e
AB
3903 .alg.skcipher = {
3904 .base.cra_name = "rfc3686(ctr(aes))",
3905 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3906 .base.cra_blocksize = 1,
3907
3908 .init = chcr_rfc3686_init,
3909 .exit = chcr_exit_tfm,
3910 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3911 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3912 .ivsize = CTR_RFC3686_IV_SIZE,
3913 .setkey = chcr_aes_rfc3686_setkey,
3914 .encrypt = chcr_aes_encrypt,
3915 .decrypt = chcr_aes_decrypt,
324429d7
HS
3916 }
3917 },
3918 /* SHA */
3919 {
3920 .type = CRYPTO_ALG_TYPE_AHASH,
3921 .is_registered = 0,
3922 .alg.hash = {
3923 .halg.digestsize = SHA1_DIGEST_SIZE,
3924 .halg.base = {
3925 .cra_name = "sha1",
3926 .cra_driver_name = "sha1-chcr",
3927 .cra_blocksize = SHA1_BLOCK_SIZE,
3928 }
3929 }
3930 },
3931 {
3932 .type = CRYPTO_ALG_TYPE_AHASH,
3933 .is_registered = 0,
3934 .alg.hash = {
3935 .halg.digestsize = SHA256_DIGEST_SIZE,
3936 .halg.base = {
3937 .cra_name = "sha256",
3938 .cra_driver_name = "sha256-chcr",
3939 .cra_blocksize = SHA256_BLOCK_SIZE,
3940 }
3941 }
3942 },
3943 {
3944 .type = CRYPTO_ALG_TYPE_AHASH,
3945 .is_registered = 0,
3946 .alg.hash = {
3947 .halg.digestsize = SHA224_DIGEST_SIZE,
3948 .halg.base = {
3949 .cra_name = "sha224",
3950 .cra_driver_name = "sha224-chcr",
3951 .cra_blocksize = SHA224_BLOCK_SIZE,
3952 }
3953 }
3954 },
3955 {
3956 .type = CRYPTO_ALG_TYPE_AHASH,
3957 .is_registered = 0,
3958 .alg.hash = {
3959 .halg.digestsize = SHA384_DIGEST_SIZE,
3960 .halg.base = {
3961 .cra_name = "sha384",
3962 .cra_driver_name = "sha384-chcr",
3963 .cra_blocksize = SHA384_BLOCK_SIZE,
3964 }
3965 }
3966 },
3967 {
3968 .type = CRYPTO_ALG_TYPE_AHASH,
3969 .is_registered = 0,
3970 .alg.hash = {
3971 .halg.digestsize = SHA512_DIGEST_SIZE,
3972 .halg.base = {
3973 .cra_name = "sha512",
3974 .cra_driver_name = "sha512-chcr",
3975 .cra_blocksize = SHA512_BLOCK_SIZE,
3976 }
3977 }
3978 },
3979 /* HMAC */
3980 {
3981 .type = CRYPTO_ALG_TYPE_HMAC,
3982 .is_registered = 0,
3983 .alg.hash = {
3984 .halg.digestsize = SHA1_DIGEST_SIZE,
3985 .halg.base = {
3986 .cra_name = "hmac(sha1)",
2debd332 3987 .cra_driver_name = "hmac-sha1-chcr",
324429d7
HS
3988 .cra_blocksize = SHA1_BLOCK_SIZE,
3989 }
3990 }
3991 },
3992 {
3993 .type = CRYPTO_ALG_TYPE_HMAC,
3994 .is_registered = 0,
3995 .alg.hash = {
3996 .halg.digestsize = SHA224_DIGEST_SIZE,
3997 .halg.base = {
3998 .cra_name = "hmac(sha224)",
2debd332 3999 .cra_driver_name = "hmac-sha224-chcr",
324429d7
HS
4000 .cra_blocksize = SHA224_BLOCK_SIZE,
4001 }
4002 }
4003 },
4004 {
4005 .type = CRYPTO_ALG_TYPE_HMAC,
4006 .is_registered = 0,
4007 .alg.hash = {
4008 .halg.digestsize = SHA256_DIGEST_SIZE,
4009 .halg.base = {
4010 .cra_name = "hmac(sha256)",
2debd332 4011 .cra_driver_name = "hmac-sha256-chcr",
324429d7
HS
4012 .cra_blocksize = SHA256_BLOCK_SIZE,
4013 }
4014 }
4015 },
4016 {
4017 .type = CRYPTO_ALG_TYPE_HMAC,
4018 .is_registered = 0,
4019 .alg.hash = {
4020 .halg.digestsize = SHA384_DIGEST_SIZE,
4021 .halg.base = {
4022 .cra_name = "hmac(sha384)",
2debd332 4023 .cra_driver_name = "hmac-sha384-chcr",
324429d7
HS
4024 .cra_blocksize = SHA384_BLOCK_SIZE,
4025 }
4026 }
4027 },
4028 {
4029 .type = CRYPTO_ALG_TYPE_HMAC,
4030 .is_registered = 0,
4031 .alg.hash = {
4032 .halg.digestsize = SHA512_DIGEST_SIZE,
4033 .halg.base = {
4034 .cra_name = "hmac(sha512)",
2debd332 4035 .cra_driver_name = "hmac-sha512-chcr",
324429d7
HS
4036 .cra_blocksize = SHA512_BLOCK_SIZE,
4037 }
4038 }
4039 },
2debd332
HJ
4040 /* Add AEAD Algorithms */
4041 {
4042 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4043 .is_registered = 0,
4044 .alg.aead = {
4045 .base = {
4046 .cra_name = "gcm(aes)",
4047 .cra_driver_name = "gcm-aes-chcr",
4048 .cra_blocksize = 1,
e29abda5 4049 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4050 .cra_ctxsize = sizeof(struct chcr_context) +
4051 sizeof(struct chcr_aead_ctx) +
4052 sizeof(struct chcr_gcm_ctx),
4053 },
8f6acb7f 4054 .ivsize = GCM_AES_IV_SIZE,
2debd332
HJ
4055 .maxauthsize = GHASH_DIGEST_SIZE,
4056 .setkey = chcr_gcm_setkey,
4057 .setauthsize = chcr_gcm_setauthsize,
4058 }
4059 },
4060 {
4061 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4062 .is_registered = 0,
4063 .alg.aead = {
4064 .base = {
4065 .cra_name = "rfc4106(gcm(aes))",
4066 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4067 .cra_blocksize = 1,
e29abda5 4068 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
4069 .cra_ctxsize = sizeof(struct chcr_context) +
4070 sizeof(struct chcr_aead_ctx) +
4071 sizeof(struct chcr_gcm_ctx),
4072
4073 },
8f6acb7f 4074 .ivsize = GCM_RFC4106_IV_SIZE,
2debd332
HJ
4075 .maxauthsize = GHASH_DIGEST_SIZE,
4076 .setkey = chcr_gcm_setkey,
4077 .setauthsize = chcr_4106_4309_setauthsize,
4078 }
4079 },
4080 {
4081 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4082 .is_registered = 0,
4083 .alg.aead = {
4084 .base = {
4085 .cra_name = "ccm(aes)",
4086 .cra_driver_name = "ccm-aes-chcr",
4087 .cra_blocksize = 1,
e29abda5 4088 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4089 .cra_ctxsize = sizeof(struct chcr_context) +
4090 sizeof(struct chcr_aead_ctx),
4091
4092 },
4093 .ivsize = AES_BLOCK_SIZE,
4094 .maxauthsize = GHASH_DIGEST_SIZE,
4095 .setkey = chcr_aead_ccm_setkey,
4096 .setauthsize = chcr_ccm_setauthsize,
4097 }
4098 },
4099 {
4100 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4101 .is_registered = 0,
4102 .alg.aead = {
4103 .base = {
4104 .cra_name = "rfc4309(ccm(aes))",
4105 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4106 .cra_blocksize = 1,
e29abda5 4107 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
4108 .cra_ctxsize = sizeof(struct chcr_context) +
4109 sizeof(struct chcr_aead_ctx),
4110
4111 },
4112 .ivsize = 8,
4113 .maxauthsize = GHASH_DIGEST_SIZE,
4114 .setkey = chcr_aead_rfc4309_setkey,
4115 .setauthsize = chcr_4106_4309_setauthsize,
4116 }
4117 },
4118 {
3d64bd67 4119 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4120 .is_registered = 0,
4121 .alg.aead = {
4122 .base = {
4123 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4124 .cra_driver_name =
4125 "authenc-hmac-sha1-cbc-aes-chcr",
4126 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4127 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4128 .cra_ctxsize = sizeof(struct chcr_context) +
4129 sizeof(struct chcr_aead_ctx) +
4130 sizeof(struct chcr_authenc_ctx),
4131
4132 },
4133 .ivsize = AES_BLOCK_SIZE,
4134 .maxauthsize = SHA1_DIGEST_SIZE,
4135 .setkey = chcr_authenc_setkey,
4136 .setauthsize = chcr_authenc_setauthsize,
4137 }
4138 },
4139 {
3d64bd67 4140 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4141 .is_registered = 0,
4142 .alg.aead = {
4143 .base = {
4144
4145 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4146 .cra_driver_name =
4147 "authenc-hmac-sha256-cbc-aes-chcr",
4148 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4149 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4150 .cra_ctxsize = sizeof(struct chcr_context) +
4151 sizeof(struct chcr_aead_ctx) +
4152 sizeof(struct chcr_authenc_ctx),
4153
4154 },
4155 .ivsize = AES_BLOCK_SIZE,
4156 .maxauthsize = SHA256_DIGEST_SIZE,
4157 .setkey = chcr_authenc_setkey,
4158 .setauthsize = chcr_authenc_setauthsize,
4159 }
4160 },
4161 {
3d64bd67 4162 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4163 .is_registered = 0,
4164 .alg.aead = {
4165 .base = {
4166 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4167 .cra_driver_name =
4168 "authenc-hmac-sha224-cbc-aes-chcr",
4169 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4170 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4171 .cra_ctxsize = sizeof(struct chcr_context) +
4172 sizeof(struct chcr_aead_ctx) +
4173 sizeof(struct chcr_authenc_ctx),
4174 },
4175 .ivsize = AES_BLOCK_SIZE,
4176 .maxauthsize = SHA224_DIGEST_SIZE,
4177 .setkey = chcr_authenc_setkey,
4178 .setauthsize = chcr_authenc_setauthsize,
4179 }
4180 },
4181 {
3d64bd67 4182 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4183 .is_registered = 0,
4184 .alg.aead = {
4185 .base = {
4186 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4187 .cra_driver_name =
4188 "authenc-hmac-sha384-cbc-aes-chcr",
4189 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4190 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4191 .cra_ctxsize = sizeof(struct chcr_context) +
4192 sizeof(struct chcr_aead_ctx) +
4193 sizeof(struct chcr_authenc_ctx),
4194
4195 },
4196 .ivsize = AES_BLOCK_SIZE,
4197 .maxauthsize = SHA384_DIGEST_SIZE,
4198 .setkey = chcr_authenc_setkey,
4199 .setauthsize = chcr_authenc_setauthsize,
4200 }
4201 },
4202 {
3d64bd67 4203 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
2debd332
HJ
4204 .is_registered = 0,
4205 .alg.aead = {
4206 .base = {
4207 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4208 .cra_driver_name =
4209 "authenc-hmac-sha512-cbc-aes-chcr",
4210 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4211 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4212 .cra_ctxsize = sizeof(struct chcr_context) +
4213 sizeof(struct chcr_aead_ctx) +
4214 sizeof(struct chcr_authenc_ctx),
4215
4216 },
4217 .ivsize = AES_BLOCK_SIZE,
4218 .maxauthsize = SHA512_DIGEST_SIZE,
4219 .setkey = chcr_authenc_setkey,
4220 .setauthsize = chcr_authenc_setauthsize,
4221 }
4222 },
4223 {
3d64bd67 4224 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
2debd332
HJ
4225 .is_registered = 0,
4226 .alg.aead = {
4227 .base = {
4228 .cra_name = "authenc(digest_null,cbc(aes))",
4229 .cra_driver_name =
4230 "authenc-digest_null-cbc-aes-chcr",
4231 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 4232 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
4233 .cra_ctxsize = sizeof(struct chcr_context) +
4234 sizeof(struct chcr_aead_ctx) +
4235 sizeof(struct chcr_authenc_ctx),
4236
4237 },
4238 .ivsize = AES_BLOCK_SIZE,
4239 .maxauthsize = 0,
4240 .setkey = chcr_aead_digest_null_setkey,
4241 .setauthsize = chcr_authenc_null_setauthsize,
4242 }
4243 },
3d64bd67
HJ
4244 {
4245 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4246 .is_registered = 0,
4247 .alg.aead = {
4248 .base = {
4249 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4250 .cra_driver_name =
4251 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4252 .cra_blocksize = 1,
4253 .cra_priority = CHCR_AEAD_PRIORITY,
4254 .cra_ctxsize = sizeof(struct chcr_context) +
4255 sizeof(struct chcr_aead_ctx) +
4256 sizeof(struct chcr_authenc_ctx),
4257
4258 },
4259 .ivsize = CTR_RFC3686_IV_SIZE,
4260 .maxauthsize = SHA1_DIGEST_SIZE,
4261 .setkey = chcr_authenc_setkey,
4262 .setauthsize = chcr_authenc_setauthsize,
4263 }
4264 },
4265 {
4266 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4267 .is_registered = 0,
4268 .alg.aead = {
4269 .base = {
4270
4271 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4272 .cra_driver_name =
4273 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4274 .cra_blocksize = 1,
4275 .cra_priority = CHCR_AEAD_PRIORITY,
4276 .cra_ctxsize = sizeof(struct chcr_context) +
4277 sizeof(struct chcr_aead_ctx) +
4278 sizeof(struct chcr_authenc_ctx),
4279
4280 },
4281 .ivsize = CTR_RFC3686_IV_SIZE,
4282 .maxauthsize = SHA256_DIGEST_SIZE,
4283 .setkey = chcr_authenc_setkey,
4284 .setauthsize = chcr_authenc_setauthsize,
4285 }
4286 },
4287 {
4288 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4289 .is_registered = 0,
4290 .alg.aead = {
4291 .base = {
4292 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4293 .cra_driver_name =
4294 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4295 .cra_blocksize = 1,
4296 .cra_priority = CHCR_AEAD_PRIORITY,
4297 .cra_ctxsize = sizeof(struct chcr_context) +
4298 sizeof(struct chcr_aead_ctx) +
4299 sizeof(struct chcr_authenc_ctx),
4300 },
4301 .ivsize = CTR_RFC3686_IV_SIZE,
4302 .maxauthsize = SHA224_DIGEST_SIZE,
4303 .setkey = chcr_authenc_setkey,
4304 .setauthsize = chcr_authenc_setauthsize,
4305 }
4306 },
4307 {
4308 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4309 .is_registered = 0,
4310 .alg.aead = {
4311 .base = {
4312 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4313 .cra_driver_name =
4314 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4315 .cra_blocksize = 1,
4316 .cra_priority = CHCR_AEAD_PRIORITY,
4317 .cra_ctxsize = sizeof(struct chcr_context) +
4318 sizeof(struct chcr_aead_ctx) +
4319 sizeof(struct chcr_authenc_ctx),
4320
4321 },
4322 .ivsize = CTR_RFC3686_IV_SIZE,
4323 .maxauthsize = SHA384_DIGEST_SIZE,
4324 .setkey = chcr_authenc_setkey,
4325 .setauthsize = chcr_authenc_setauthsize,
4326 }
4327 },
4328 {
4329 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4330 .is_registered = 0,
4331 .alg.aead = {
4332 .base = {
4333 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4334 .cra_driver_name =
4335 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4336 .cra_blocksize = 1,
4337 .cra_priority = CHCR_AEAD_PRIORITY,
4338 .cra_ctxsize = sizeof(struct chcr_context) +
4339 sizeof(struct chcr_aead_ctx) +
4340 sizeof(struct chcr_authenc_ctx),
4341
4342 },
4343 .ivsize = CTR_RFC3686_IV_SIZE,
4344 .maxauthsize = SHA512_DIGEST_SIZE,
4345 .setkey = chcr_authenc_setkey,
4346 .setauthsize = chcr_authenc_setauthsize,
4347 }
4348 },
4349 {
4350 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4351 .is_registered = 0,
4352 .alg.aead = {
4353 .base = {
4354 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4355 .cra_driver_name =
4356 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4357 .cra_blocksize = 1,
4358 .cra_priority = CHCR_AEAD_PRIORITY,
4359 .cra_ctxsize = sizeof(struct chcr_context) +
4360 sizeof(struct chcr_aead_ctx) +
4361 sizeof(struct chcr_authenc_ctx),
4362
4363 },
4364 .ivsize = CTR_RFC3686_IV_SIZE,
4365 .maxauthsize = 0,
4366 .setkey = chcr_aead_digest_null_setkey,
4367 .setauthsize = chcr_authenc_null_setauthsize,
4368 }
4369 },
324429d7
HS
4370};
4371
4372/*
4373 * chcr_unregister_alg - Deregister crypto algorithms with
4374 * kernel framework.
4375 */
4376static int chcr_unregister_alg(void)
4377{
4378 int i;
4379
4380 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4381 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
7cea6d3e 4382 case CRYPTO_ALG_TYPE_SKCIPHER:
8b9914cd
AS
4383 if (driver_algs[i].is_registered && refcount_read(
4384 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4385 == 1) {
7cea6d3e
AB
4386 crypto_unregister_skcipher(
4387 &driver_algs[i].alg.skcipher);
8b9914cd
AS
4388 driver_algs[i].is_registered = 0;
4389 }
324429d7 4390 break;
2debd332 4391 case CRYPTO_ALG_TYPE_AEAD:
8b9914cd
AS
4392 if (driver_algs[i].is_registered && refcount_read(
4393 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
2debd332
HJ
4394 crypto_unregister_aead(
4395 &driver_algs[i].alg.aead);
8b9914cd
AS
4396 driver_algs[i].is_registered = 0;
4397 }
2debd332 4398 break;
324429d7 4399 case CRYPTO_ALG_TYPE_AHASH:
8b9914cd
AS
4400 if (driver_algs[i].is_registered && refcount_read(
4401 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4402 == 1) {
324429d7
HS
4403 crypto_unregister_ahash(
4404 &driver_algs[i].alg.hash);
8b9914cd
AS
4405 driver_algs[i].is_registered = 0;
4406 }
324429d7
HS
4407 break;
4408 }
324429d7
HS
4409 }
4410 return 0;
4411}
4412
4413#define SZ_AHASH_CTX sizeof(struct chcr_context)
4414#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4415#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
324429d7
HS
4416
4417/*
4418 * chcr_register_alg - Register crypto algorithms with kernel framework.
4419 */
4420static int chcr_register_alg(void)
4421{
4422 struct crypto_alg ai;
4423 struct ahash_alg *a_hash;
4424 int err = 0, i;
4425 char *name = NULL;
4426
4427 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4428 if (driver_algs[i].is_registered)
4429 continue;
4430 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
7cea6d3e
AB
4431 case CRYPTO_ALG_TYPE_SKCIPHER:
4432 driver_algs[i].alg.skcipher.base.cra_priority =
b8fd1f41 4433 CHCR_CRA_PRIORITY;
7cea6d3e
AB
4434 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4435 driver_algs[i].alg.skcipher.base.cra_flags =
4436 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
b8aa7dc5 4437 CRYPTO_ALG_ALLOCATES_MEMORY |
b8fd1f41 4438 CRYPTO_ALG_NEED_FALLBACK;
7cea6d3e 4439 driver_algs[i].alg.skcipher.base.cra_ctxsize =
b8fd1f41
HJ
4440 sizeof(struct chcr_context) +
4441 sizeof(struct ablk_ctx);
7cea6d3e
AB
4442 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4443
4444 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4445 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
324429d7 4446 break;
2debd332 4447 case CRYPTO_ALG_TYPE_AEAD:
2debd332 4448 driver_algs[i].alg.aead.base.cra_flags =
b8aa7dc5
MP
4449 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4450 CRYPTO_ALG_ALLOCATES_MEMORY;
2debd332
HJ
4451 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4452 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4453 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4454 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4455 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4456 err = crypto_register_aead(&driver_algs[i].alg.aead);
4457 name = driver_algs[i].alg.aead.base.cra_driver_name;
4458 break;
324429d7
HS
4459 case CRYPTO_ALG_TYPE_AHASH:
4460 a_hash = &driver_algs[i].alg.hash;
4461 a_hash->update = chcr_ahash_update;
4462 a_hash->final = chcr_ahash_final;
4463 a_hash->finup = chcr_ahash_finup;
4464 a_hash->digest = chcr_ahash_digest;
4465 a_hash->export = chcr_ahash_export;
4466 a_hash->import = chcr_ahash_import;
4467 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4468 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4469 a_hash->halg.base.cra_module = THIS_MODULE;
b8aa7dc5
MP
4470 a_hash->halg.base.cra_flags =
4471 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
324429d7
HS
4472 a_hash->halg.base.cra_alignmask = 0;
4473 a_hash->halg.base.cra_exit = NULL;
324429d7
HS
4474
4475 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4476 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4477 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4478 a_hash->init = chcr_hmac_init;
4479 a_hash->setkey = chcr_ahash_setkey;
4480 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4481 } else {
4482 a_hash->init = chcr_sha_init;
4483 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4484 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4485 }
4486 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4487 ai = driver_algs[i].alg.hash.halg.base;
4488 name = ai.cra_driver_name;
4489 break;
4490 }
4491 if (err) {
4492 pr_err("chcr : %s : Algorithm registration failed\n",
4493 name);
4494 goto register_err;
4495 } else {
4496 driver_algs[i].is_registered = 1;
4497 }
4498 }
4499 return 0;
4500
4501register_err:
4502 chcr_unregister_alg();
4503 return err;
4504}
4505
4506/*
4507 * start_crypto - Register the crypto algorithms.
4508 * This should called once when the first device comesup. After this
4509 * kernel will start calling driver APIs for crypto operations.
4510 */
4511int start_crypto(void)
4512{
4513 return chcr_register_alg();
4514}
4515
4516/*
4517 * stop_crypto - Deregister all the crypto algorithms with kernel.
4518 * This should be called once when the last device goes down. After this
4519 * kernel will not call the driver API for crypto operations.
4520 */
4521int stop_crypto(void)
4522{
4523 chcr_unregister_alg();
4524 return 0;
4525}