]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/chelsio/chcr_algo.c
crypto: chcr - Ensure Destination sg entry size less than 2k
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / chelsio / chcr_algo.c
CommitLineData
324429d7
HS
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/sha.h>
2debd332 57#include <crypto/authenc.h>
b8fd1f41
HJ
58#include <crypto/ctr.h>
59#include <crypto/gf128mul.h>
2debd332
HJ
60#include <crypto/internal/aead.h>
61#include <crypto/null.h>
62#include <crypto/internal/skcipher.h>
63#include <crypto/aead.h>
64#include <crypto/scatterwalk.h>
324429d7
HS
65#include <crypto/internal/hash.h>
66
67#include "t4fw_api.h"
68#include "t4_msg.h"
69#include "chcr_core.h"
70#include "chcr_algo.h"
71#include "chcr_crypto.h"
72
2debd332
HJ
73static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
74{
75 return ctx->crypto_ctx->aeadctx;
76}
77
324429d7
HS
78static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
79{
80 return ctx->crypto_ctx->ablkctx;
81}
82
83static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
84{
85 return ctx->crypto_ctx->hmacctx;
86}
87
2debd332
HJ
88static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
89{
90 return gctx->ctx->gcm;
91}
92
93static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
94{
95 return gctx->ctx->authenc;
96}
97
324429d7
HS
98static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
99{
100 return ctx->dev->u_ctx;
101}
102
103static inline int is_ofld_imm(const struct sk_buff *skb)
104{
105 return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
106}
107
108/*
109 * sgl_len - calculates the size of an SGL of the given capacity
110 * @n: the number of SGL entries
111 * Calculates the number of flits needed for a scatter/gather list that
112 * can hold the given number of entries.
113 */
114static inline unsigned int sgl_len(unsigned int n)
115{
116 n--;
117 return (3 * n) / 2 + (n & 1) + 2;
118}
119
2debd332
HJ
120static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
121{
122 u8 temp[SHA512_DIGEST_SIZE];
123 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
124 int authsize = crypto_aead_authsize(tfm);
125 struct cpl_fw6_pld *fw6_pld;
126 int cmp = 0;
127
128 fw6_pld = (struct cpl_fw6_pld *)input;
129 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
130 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
d600fc8a 131 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
2debd332
HJ
132 } else {
133
134 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
135 authsize, req->assoclen +
136 req->cryptlen - authsize);
d600fc8a 137 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
2debd332
HJ
138 }
139 if (cmp)
140 *err = -EBADMSG;
141 else
142 *err = 0;
143}
144
324429d7
HS
145/*
146 * chcr_handle_resp - Unmap the DMA buffers associated with the request
147 * @req: crypto request
148 */
149int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2debd332 150 int err)
324429d7
HS
151{
152 struct crypto_tfm *tfm = req->tfm;
153 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
154 struct uld_ctx *u_ctx = ULD_CTX(ctx);
155 struct chcr_req_ctx ctx_req;
324429d7 156 unsigned int digestsize, updated_digestsize;
ee0863ba 157 struct adapter *adap = padap(ctx->dev);
324429d7
HS
158
159 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2debd332 160 case CRYPTO_ALG_TYPE_AEAD:
b8fd1f41 161 ctx_req.req.aead_req = aead_request_cast(req);
2debd332 162 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
94e1dab1 163 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
2debd332
HJ
164 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
165 if (ctx_req.ctx.reqctx->skb) {
166 kfree_skb(ctx_req.ctx.reqctx->skb);
167 ctx_req.ctx.reqctx->skb = NULL;
168 }
738bff48
HJ
169 free_new_sg(ctx_req.ctx.reqctx->newdstsg);
170 ctx_req.ctx.reqctx->newdstsg = NULL;
2debd332
HJ
171 if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
172 chcr_verify_tag(ctx_req.req.aead_req, input,
173 &err);
174 ctx_req.ctx.reqctx->verify = VERIFY_HW;
175 }
b8fd1f41 176 ctx_req.req.aead_req->base.complete(req, err);
2debd332
HJ
177 break;
178
44e9f799 179 case CRYPTO_ALG_TYPE_ABLKCIPHER:
b8fd1f41
HJ
180 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
181 input, err);
324429d7
HS
182 break;
183
184 case CRYPTO_ALG_TYPE_AHASH:
b8fd1f41 185 ctx_req.req.ahash_req = ahash_request_cast(req);
324429d7
HS
186 ctx_req.ctx.ahash_ctx =
187 ahash_request_ctx(ctx_req.req.ahash_req);
188 digestsize =
189 crypto_ahash_digestsize(crypto_ahash_reqtfm(
190 ctx_req.req.ahash_req));
191 updated_digestsize = digestsize;
192 if (digestsize == SHA224_DIGEST_SIZE)
193 updated_digestsize = SHA256_DIGEST_SIZE;
194 else if (digestsize == SHA384_DIGEST_SIZE)
195 updated_digestsize = SHA512_DIGEST_SIZE;
5c86a8ff
HJ
196 if (ctx_req.ctx.ahash_ctx->skb) {
197 kfree_skb(ctx_req.ctx.ahash_ctx->skb);
324429d7 198 ctx_req.ctx.ahash_ctx->skb = NULL;
5c86a8ff 199 }
324429d7
HS
200 if (ctx_req.ctx.ahash_ctx->result == 1) {
201 ctx_req.ctx.ahash_ctx->result = 0;
202 memcpy(ctx_req.req.ahash_req->result, input +
203 sizeof(struct cpl_fw6_pld),
204 digestsize);
205 } else {
206 memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
207 sizeof(struct cpl_fw6_pld),
208 updated_digestsize);
209 }
b8fd1f41 210 ctx_req.req.ahash_req->base.complete(req, err);
324429d7
HS
211 break;
212 }
ee0863ba 213 atomic_inc(&adap->chcr_stats.complete);
2debd332 214 return err;
324429d7
HS
215}
216
217/*
218 * calc_tx_flits_ofld - calculate # of flits for an offload packet
219 * @skb: the packet
220 * Returns the number of flits needed for the given offload packet.
221 * These packets are already fully constructed and no additional headers
222 * will be added.
223 */
224static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
225{
226 unsigned int flits, cnt;
227
228 if (is_ofld_imm(skb))
229 return DIV_ROUND_UP(skb->len, 8);
230
231 flits = skb_transport_offset(skb) / 8; /* headers */
232 cnt = skb_shinfo(skb)->nr_frags;
233 if (skb_tail_pointer(skb) != skb_transport_header(skb))
234 cnt++;
235 return flits + sgl_len(cnt);
236}
237
39f91a34
HJ
238static inline void get_aes_decrypt_key(unsigned char *dec_key,
239 const unsigned char *key,
240 unsigned int keylength)
241{
242 u32 temp;
243 u32 w_ring[MAX_NK];
244 int i, j, k;
245 u8 nr, nk;
246
247 switch (keylength) {
248 case AES_KEYLENGTH_128BIT:
249 nk = KEYLENGTH_4BYTES;
250 nr = NUMBER_OF_ROUNDS_10;
251 break;
252 case AES_KEYLENGTH_192BIT:
253 nk = KEYLENGTH_6BYTES;
254 nr = NUMBER_OF_ROUNDS_12;
255 break;
256 case AES_KEYLENGTH_256BIT:
257 nk = KEYLENGTH_8BYTES;
258 nr = NUMBER_OF_ROUNDS_14;
259 break;
260 default:
261 return;
262 }
263 for (i = 0; i < nk; i++)
264 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
265
266 i = 0;
267 temp = w_ring[nk - 1];
268 while (i + nk < (nr + 1) * 4) {
269 if (!(i % nk)) {
270 /* RotWord(temp) */
271 temp = (temp << 8) | (temp >> 24);
272 temp = aes_ks_subword(temp);
273 temp ^= round_constant[i / nk];
274 } else if (nk == 8 && (i % 4 == 0)) {
275 temp = aes_ks_subword(temp);
276 }
277 w_ring[i % nk] ^= temp;
278 temp = w_ring[i % nk];
279 i++;
280 }
281 i--;
282 for (k = 0, j = i % nk; k < nk; k++) {
283 *((u32 *)dec_key + k) = htonl(w_ring[j]);
284 j--;
285 if (j < 0)
286 j += nk;
287 }
288}
289
e7922729 290static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
324429d7 291{
ec1bca94 292 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
324429d7
HS
293
294 switch (ds) {
295 case SHA1_DIGEST_SIZE:
e7922729 296 base_hash = crypto_alloc_shash("sha1", 0, 0);
324429d7
HS
297 break;
298 case SHA224_DIGEST_SIZE:
e7922729 299 base_hash = crypto_alloc_shash("sha224", 0, 0);
324429d7
HS
300 break;
301 case SHA256_DIGEST_SIZE:
e7922729 302 base_hash = crypto_alloc_shash("sha256", 0, 0);
324429d7
HS
303 break;
304 case SHA384_DIGEST_SIZE:
e7922729 305 base_hash = crypto_alloc_shash("sha384", 0, 0);
324429d7
HS
306 break;
307 case SHA512_DIGEST_SIZE:
e7922729 308 base_hash = crypto_alloc_shash("sha512", 0, 0);
324429d7
HS
309 break;
310 }
324429d7 311
e7922729 312 return base_hash;
324429d7
HS
313}
314
315static int chcr_compute_partial_hash(struct shash_desc *desc,
316 char *iopad, char *result_hash,
317 int digest_size)
318{
319 struct sha1_state sha1_st;
320 struct sha256_state sha256_st;
321 struct sha512_state sha512_st;
322 int error;
323
324 if (digest_size == SHA1_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha1_st);
328 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
329 } else if (digest_size == SHA224_DIGEST_SIZE) {
330 error = crypto_shash_init(desc) ?:
331 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 crypto_shash_export(desc, (void *)&sha256_st);
333 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335 } else if (digest_size == SHA256_DIGEST_SIZE) {
336 error = crypto_shash_init(desc) ?:
337 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
338 crypto_shash_export(desc, (void *)&sha256_st);
339 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
340
341 } else if (digest_size == SHA384_DIGEST_SIZE) {
342 error = crypto_shash_init(desc) ?:
343 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 crypto_shash_export(desc, (void *)&sha512_st);
345 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346
347 } else if (digest_size == SHA512_DIGEST_SIZE) {
348 error = crypto_shash_init(desc) ?:
349 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
350 crypto_shash_export(desc, (void *)&sha512_st);
351 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
352 } else {
353 error = -EINVAL;
354 pr_err("Unknown digest size %d\n", digest_size);
355 }
356 return error;
357}
358
359static void chcr_change_order(char *buf, int ds)
360{
361 int i;
362
363 if (ds == SHA512_DIGEST_SIZE) {
364 for (i = 0; i < (ds / sizeof(u64)); i++)
365 *((__be64 *)buf + i) =
366 cpu_to_be64(*((u64 *)buf + i));
367 } else {
368 for (i = 0; i < (ds / sizeof(u32)); i++)
369 *((__be32 *)buf + i) =
370 cpu_to_be32(*((u32 *)buf + i));
371 }
372}
373
374static inline int is_hmac(struct crypto_tfm *tfm)
375{
376 struct crypto_alg *alg = tfm->__crt_alg;
377 struct chcr_alg_template *chcr_crypto_alg =
378 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
379 alg.hash);
5c86a8ff 380 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
324429d7
HS
381 return 1;
382 return 0;
383}
384
324429d7
HS
385static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
386 struct scatterlist *sg,
387 struct phys_sge_parm *sg_param)
388{
389 struct phys_sge_pairs *to;
b8fd1f41 390 unsigned int len = 0, left_size = sg_param->obsize;
adf1ca61 391 unsigned int nents = sg_param->nents, i, j = 0;
324429d7
HS
392
393 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
394 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
395 phys_cpl->pcirlxorder_to_noofsgentr =
396 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
397 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
398 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
400 CPL_RX_PHYS_DSGL_DCAID_V(0) |
401 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
402 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
403 phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
404 phys_cpl->rss_hdr_int.hash_val = 0;
405 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
406 sizeof(struct cpl_rx_phys_dsgl));
b8fd1f41
HJ
407 for (i = 0; nents && left_size; to++) {
408 for (j = 0; j < 8 && nents && left_size; j++, nents--) {
409 len = min(left_size, sg_dma_len(sg));
410 to->len[j] = htons(len);
324429d7 411 to->addr[j] = cpu_to_be64(sg_dma_address(sg));
b8fd1f41 412 left_size -= len;
324429d7
HS
413 sg = sg_next(sg);
414 }
415 }
416}
417
adf1ca61
HJ
418static inline int map_writesg_phys_cpl(struct device *dev,
419 struct cpl_rx_phys_dsgl *phys_cpl,
420 struct scatterlist *sg,
421 struct phys_sge_parm *sg_param)
324429d7
HS
422{
423 if (!sg || !sg_param->nents)
b8fd1f41 424 return -EINVAL;
324429d7
HS
425
426 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
427 if (sg_param->nents == 0) {
428 pr_err("CHCR : DMA mapping failed\n");
429 return -EINVAL;
430 }
431 write_phys_cpl(phys_cpl, sg, sg_param);
432 return 0;
433}
434
2debd332
HJ
435static inline int get_aead_subtype(struct crypto_aead *aead)
436{
437 struct aead_alg *alg = crypto_aead_alg(aead);
438 struct chcr_alg_template *chcr_crypto_alg =
439 container_of(alg, struct chcr_alg_template, alg.aead);
440 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
441}
442
324429d7
HS
443static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
444{
445 struct crypto_alg *alg = tfm->__crt_alg;
446 struct chcr_alg_template *chcr_crypto_alg =
447 container_of(alg, struct chcr_alg_template, alg.crypto);
448
449 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
450}
451
358961d1
HJ
452static inline void write_buffer_to_skb(struct sk_buff *skb,
453 unsigned int *frags,
454 char *bfr,
455 u8 bfr_len)
456{
457 skb->len += bfr_len;
458 skb->data_len += bfr_len;
459 skb->truesize += bfr_len;
460 get_page(virt_to_page(bfr));
461 skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
462 offset_in_page(bfr), bfr_len);
463 (*frags)++;
464}
465
466
324429d7 467static inline void
358961d1 468write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
324429d7
HS
469 struct scatterlist *sg, unsigned int count)
470{
471 struct page *spage;
472 unsigned int page_len;
473
474 skb->len += count;
475 skb->data_len += count;
476 skb->truesize += count;
18f0aa06 477
324429d7 478 while (count > 0) {
18f0aa06 479 if (!sg || (!(sg->length)))
324429d7
HS
480 break;
481 spage = sg_page(sg);
482 get_page(spage);
483 page_len = min(sg->length, count);
484 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
485 (*frags)++;
486 count -= page_len;
487 sg = sg_next(sg);
488 }
489}
490
b8fd1f41
HJ
491static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
492{
493 struct adapter *adap = netdev2adap(dev);
494 struct sge_uld_txq_info *txq_info =
495 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
496 struct sge_uld_txq *txq;
497 int ret = 0;
498
499 local_bh_disable();
500 txq = &txq_info->uldtxq[idx];
501 spin_lock(&txq->sendq.lock);
502 if (txq->full)
503 ret = -1;
504 spin_unlock(&txq->sendq.lock);
505 local_bh_enable();
506 return ret;
507}
508
324429d7
HS
509static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
510 struct _key_ctx *key_ctx)
511{
512 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
cc1b156d 513 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
324429d7
HS
514 } else {
515 memcpy(key_ctx->key,
516 ablkctx->key + (ablkctx->enckey_len >> 1),
517 ablkctx->enckey_len >> 1);
cc1b156d
HJ
518 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
519 ablkctx->rrkey, ablkctx->enckey_len >> 1);
324429d7
HS
520 }
521 return 0;
522}
b8fd1f41
HJ
523static int chcr_sg_ent_in_wr(struct scatterlist *src,
524 struct scatterlist *dst,
525 unsigned int minsg,
526 unsigned int space,
527 short int *sent,
528 short int *dent)
529{
530 int srclen = 0, dstlen = 0;
531 int srcsg = minsg, dstsg = 0;
532
533 *sent = 0;
534 *dent = 0;
535 while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
536 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
537 srclen += src->length;
538 srcsg++;
539 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
540 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
541 if (srclen <= dstlen)
542 break;
543 dstlen += dst->length;
544 dst = sg_next(dst);
545 dstsg++;
546 }
547 src = sg_next(src);
548 }
549 *sent = srcsg - minsg;
550 *dent = dstsg;
551 return min(srclen, dstlen);
552}
553
554static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
555 u32 flags,
556 struct scatterlist *src,
557 struct scatterlist *dst,
558 unsigned int nbytes,
559 u8 *iv,
560 unsigned short op_type)
561{
562 int err;
563
564 SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
565 skcipher_request_set_tfm(subreq, cipher);
566 skcipher_request_set_callback(subreq, flags, NULL, NULL);
567 skcipher_request_set_crypt(subreq, src, dst,
568 nbytes, iv);
569
570 err = op_type ? crypto_skcipher_decrypt(subreq) :
571 crypto_skcipher_encrypt(subreq);
572 skcipher_request_zero(subreq);
573
574 return err;
324429d7 575
b8fd1f41 576}
324429d7 577static inline void create_wreq(struct chcr_context *ctx,
358961d1 578 struct chcr_wr *chcr_req,
324429d7
HS
579 void *req, struct sk_buff *skb,
580 int kctx_len, int hash_sz,
2debd332 581 int is_iv,
2512a624
HJ
582 unsigned int sc_len,
583 unsigned int lcb)
324429d7
HS
584{
585 struct uld_ctx *u_ctx = ULD_CTX(ctx);
324429d7 586 int iv_loc = IV_DSGL;
72a56ca9 587 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
324429d7
HS
588 unsigned int immdatalen = 0, nr_frags = 0;
589
590 if (is_ofld_imm(skb)) {
591 immdatalen = skb->data_len;
592 iv_loc = IV_IMMEDIATE;
593 } else {
594 nr_frags = skb_shinfo(skb)->nr_frags;
595 }
596
358961d1
HJ
597 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
598 ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
599 chcr_req->wreq.pld_size_hash_size =
324429d7
HS
600 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
601 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
358961d1
HJ
602 chcr_req->wreq.len16_pkd =
603 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
324429d7 604 (calc_tx_flits_ofld(skb) * 8), 16)));
358961d1
HJ
605 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
606 chcr_req->wreq.rx_chid_to_rx_q_id =
8a13449f 607 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
2512a624
HJ
608 is_iv ? iv_loc : IV_NOP, !!lcb,
609 ctx->tx_qidx);
324429d7 610
8a13449f
HJ
611 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
612 qid);
358961d1
HJ
613 chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
614 16) - ((sizeof(chcr_req->wreq)) >> 4)));
324429d7 615
358961d1
HJ
616 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
617 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
618 sizeof(chcr_req->key_ctx) +
2debd332 619 kctx_len + sc_len + immdatalen);
324429d7
HS
620}
621
622/**
623 * create_cipher_wr - form the WR for cipher operations
624 * @req: cipher req.
625 * @ctx: crypto driver context of the request.
626 * @qid: ingress qid where response of this WR should be received.
627 * @op_type: encryption or decryption
628 */
b8fd1f41 629static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
324429d7 630{
b8fd1f41 631 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
358961d1 632 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7
HS
633 struct uld_ctx *u_ctx = ULD_CTX(ctx);
634 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
635 struct sk_buff *skb = NULL;
358961d1 636 struct chcr_wr *chcr_req;
324429d7 637 struct cpl_rx_phys_dsgl *phys_cpl;
b8fd1f41
HJ
638 struct chcr_blkcipher_req_ctx *reqctx =
639 ablkcipher_request_ctx(wrparam->req);
324429d7 640 struct phys_sge_parm sg_param;
adf1ca61 641 unsigned int frags = 0, transhdr_len, phys_dsgl;
b8fd1f41
HJ
642 int error;
643 unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
644 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
645 GFP_KERNEL : GFP_ATOMIC;
ee0863ba 646 struct adapter *adap = padap(ctx->dev);
324429d7 647
5c86a8ff 648 phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
324429d7 649
358961d1 650 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
324429d7 651 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
358961d1 652 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
b8fd1f41
HJ
653 if (!skb) {
654 error = -ENOMEM;
655 goto err;
656 }
324429d7 657 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
358961d1
HJ
658 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
659 memset(chcr_req, 0, transhdr_len);
660 chcr_req->sec_cpl.op_ivinsrtofst =
8a13449f 661 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
358961d1 662
b8fd1f41 663 chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
358961d1
HJ
664 chcr_req->sec_cpl.aadstart_cipherstop_hi =
665 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
666
667 chcr_req->sec_cpl.cipherstop_lo_authinsert =
668 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
b8fd1f41 669 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
324429d7 670 ablkctx->ciph_mode,
358961d1
HJ
671 0, 0, ivsize >> 1);
672 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
324429d7
HS
673 0, 1, phys_dsgl);
674
358961d1 675 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
b8fd1f41
HJ
676 if ((reqctx->op == CHCR_DECRYPT_OP) &&
677 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
678 CRYPTO_ALG_SUB_TYPE_CTR)) &&
679 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
680 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
358961d1 681 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
324429d7 682 } else {
b8fd1f41
HJ
683 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
684 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
358961d1
HJ
685 memcpy(chcr_req->key_ctx.key, ablkctx->key,
686 ablkctx->enckey_len);
324429d7 687 } else {
358961d1 688 memcpy(chcr_req->key_ctx.key, ablkctx->key +
324429d7
HS
689 (ablkctx->enckey_len >> 1),
690 ablkctx->enckey_len >> 1);
358961d1 691 memcpy(chcr_req->key_ctx.key +
324429d7
HS
692 (ablkctx->enckey_len >> 1),
693 ablkctx->key,
694 ablkctx->enckey_len >> 1);
695 }
696 }
358961d1 697 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
5c86a8ff 698 sg_param.nents = reqctx->dst_nents;
b8fd1f41
HJ
699 sg_param.obsize = wrparam->bytes;
700 sg_param.qid = wrparam->qid;
701 error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
702 reqctx->dst, &sg_param);
703 if (error)
324429d7
HS
704 goto map_fail1;
705
706 skb_set_transport_header(skb, transhdr_len);
5c86a8ff 707 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
b8fd1f41 708 write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
ee0863ba 709 atomic_inc(&adap->chcr_stats.cipher_rqst);
b8fd1f41 710 create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
2512a624
HJ
711 sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
712 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
5c86a8ff 713 reqctx->skb = skb;
324429d7
HS
714 skb_get(skb);
715 return skb;
716map_fail1:
717 kfree_skb(skb);
b8fd1f41
HJ
718err:
719 return ERR_PTR(error);
720}
721
722static inline int chcr_keyctx_ck_size(unsigned int keylen)
723{
724 int ck_size = 0;
725
726 if (keylen == AES_KEYSIZE_128)
727 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
728 else if (keylen == AES_KEYSIZE_192)
729 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
730 else if (keylen == AES_KEYSIZE_256)
731 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
732 else
733 ck_size = 0;
734
735 return ck_size;
736}
737static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
738 const u8 *key,
739 unsigned int keylen)
740{
741 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
742 struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
743 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
744 int err = 0;
745
746 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
747 crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
748 CRYPTO_TFM_REQ_MASK);
749 err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
750 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
751 tfm->crt_flags |=
752 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
753 CRYPTO_TFM_RES_MASK;
754 return err;
324429d7
HS
755}
756
b8fd1f41
HJ
757static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
758 const u8 *key,
324429d7
HS
759 unsigned int keylen)
760{
b8fd1f41 761 struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
324429d7 762 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7
HS
763 unsigned int ck_size, context_size;
764 u16 alignment = 0;
b8fd1f41 765 int err;
324429d7 766
b8fd1f41
HJ
767 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
768 if (err)
324429d7 769 goto badkey_err;
b8fd1f41
HJ
770
771 ck_size = chcr_keyctx_ck_size(keylen);
772 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
cc1b156d
HJ
773 memcpy(ablkctx->key, key, keylen);
774 ablkctx->enckey_len = keylen;
775 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
324429d7
HS
776 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
777 keylen + alignment) >> 4;
778
779 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
780 0, 0, context_size);
781 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
782 return 0;
783badkey_err:
b8fd1f41 784 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
324429d7 785 ablkctx->enckey_len = 0;
b8fd1f41
HJ
786
787 return err;
324429d7
HS
788}
789
b8fd1f41
HJ
790static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
791 const u8 *key,
792 unsigned int keylen)
324429d7 793{
b8fd1f41
HJ
794 struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
795 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
796 unsigned int ck_size, context_size;
797 u16 alignment = 0;
798 int err;
799
800 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
801 if (err)
802 goto badkey_err;
803 ck_size = chcr_keyctx_ck_size(keylen);
804 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
805 memcpy(ablkctx->key, key, keylen);
806 ablkctx->enckey_len = keylen;
807 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
808 keylen + alignment) >> 4;
809
810 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
811 0, 0, context_size);
812 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
813
814 return 0;
815badkey_err:
816 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
817 ablkctx->enckey_len = 0;
818
819 return err;
820}
821
822static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
823 const u8 *key,
824 unsigned int keylen)
825{
826 struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
827 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
828 unsigned int ck_size, context_size;
829 u16 alignment = 0;
830 int err;
831
832 if (keylen < CTR_RFC3686_NONCE_SIZE)
833 return -EINVAL;
834 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
835 CTR_RFC3686_NONCE_SIZE);
836
837 keylen -= CTR_RFC3686_NONCE_SIZE;
838 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
839 if (err)
840 goto badkey_err;
841
842 ck_size = chcr_keyctx_ck_size(keylen);
843 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
844 memcpy(ablkctx->key, key, keylen);
845 ablkctx->enckey_len = keylen;
846 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
847 keylen + alignment) >> 4;
848
849 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
850 0, 0, context_size);
851 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
852
853 return 0;
854badkey_err:
855 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
856 ablkctx->enckey_len = 0;
857
858 return err;
859}
860static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
861{
862 unsigned int size = AES_BLOCK_SIZE;
863 __be32 *b = (__be32 *)(dstiv + size);
864 u32 c, prev;
865
866 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
867 for (; size >= 4; size -= 4) {
868 prev = be32_to_cpu(*--b);
869 c = prev + add;
870 *b = cpu_to_be32(c);
871 if (prev < c)
872 break;
873 add = 1;
874 }
875
876}
877
878static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
879{
880 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
881 u64 c;
882 u32 temp = be32_to_cpu(*--b);
883
884 temp = ~temp;
885 c = (u64)temp + 1; // No of block can processed withou overflow
886 if ((bytes / AES_BLOCK_SIZE) > c)
887 bytes = c * AES_BLOCK_SIZE;
888 return bytes;
889}
890
891static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
892{
893 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
894 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
895 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
896 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
897 struct crypto_cipher *cipher;
898 int ret, i;
899 u8 *key;
900 unsigned int keylen;
901
902 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
903 memcpy(iv, req->info, AES_BLOCK_SIZE);
904
905 if (IS_ERR(cipher)) {
906 ret = -ENOMEM;
907 goto out;
908 }
909 keylen = ablkctx->enckey_len / 2;
910 key = ablkctx->key + keylen;
911 ret = crypto_cipher_setkey(cipher, key, keylen);
912 if (ret)
913 goto out1;
914
915 crypto_cipher_encrypt_one(cipher, iv, iv);
916 for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
917 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
918
919 crypto_cipher_decrypt_one(cipher, iv, iv);
920out1:
921 crypto_free_cipher(cipher);
922out:
923 return ret;
924}
925
926static int chcr_update_cipher_iv(struct ablkcipher_request *req,
927 struct cpl_fw6_pld *fw6_pld, u8 *iv)
928{
929 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
930 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
931 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
ab677ff4 932 int ret = 0;
324429d7 933
b8fd1f41
HJ
934 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
935 ctr_add_iv(iv, req->info, (reqctx->processed /
936 AES_BLOCK_SIZE));
937 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
938 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
939 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
940 AES_BLOCK_SIZE) + 1);
941 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
942 ret = chcr_update_tweak(req, iv);
943 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
944 if (reqctx->op)
945 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
946 16,
947 reqctx->processed - AES_BLOCK_SIZE);
948 else
949 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
950 }
951
324429d7 952 return ret;
b8fd1f41 953
324429d7
HS
954}
955
b8fd1f41
HJ
956/* We need separate function for final iv because in rfc3686 Initial counter
957 * starts from 1 and buffer size of iv is 8 byte only which remains constant
958 * for subsequent update requests
959 */
960
961static int chcr_final_cipher_iv(struct ablkcipher_request *req,
962 struct cpl_fw6_pld *fw6_pld, u8 *iv)
963{
964 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
965 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
966 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
967 int ret = 0;
968
969 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
970 ctr_add_iv(iv, req->info, (reqctx->processed /
971 AES_BLOCK_SIZE));
972 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
973 ret = chcr_update_tweak(req, iv);
974 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
975 if (reqctx->op)
976 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
977 16,
978 reqctx->processed - AES_BLOCK_SIZE);
979 else
980 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
981
982 }
983 return ret;
984
985}
986
987
988static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
989 unsigned char *input, int err)
324429d7
HS
990{
991 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
992 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7 993 struct uld_ctx *u_ctx = ULD_CTX(ctx);
b8fd1f41 994 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7 995 struct sk_buff *skb;
b8fd1f41
HJ
996 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
997 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
998 struct cipher_wr_param wrparam;
999 int bytes;
1000
1001 dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
1002 DMA_FROM_DEVICE);
1003
1004 if (reqctx->skb) {
1005 kfree_skb(reqctx->skb);
1006 reqctx->skb = NULL;
1007 }
1008 if (err)
1009 goto complete;
1010
1011 if (req->nbytes == reqctx->processed) {
1012 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1013 goto complete;
1014 }
1015
1016 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1017 ctx->tx_qidx))) {
1018 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1019 err = -EBUSY;
1020 goto complete;
1021 }
1022
1023 }
1024 wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
1025 reqctx->processed);
1026 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
1027 reqctx->processed);
1028 if (!wrparam.srcsg || !reqctx->dst) {
1029 pr_err("Input sg list length less that nbytes\n");
1030 err = -EINVAL;
1031 goto complete;
1032 }
1033 bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
1034 SPACE_LEFT(ablkctx->enckey_len),
1035 &wrparam.snent, &reqctx->dst_nents);
1036 if ((bytes + reqctx->processed) >= req->nbytes)
1037 bytes = req->nbytes - reqctx->processed;
1038 else
1039 bytes = ROUND_16(bytes);
1040 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1041 if (err)
1042 goto complete;
1043
1044 if (unlikely(bytes == 0)) {
1045 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1046 req->base.flags,
1047 wrparam.srcsg,
1048 reqctx->dst,
1049 req->nbytes - reqctx->processed,
1050 reqctx->iv,
1051 reqctx->op);
1052 goto complete;
1053 }
1054
1055 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1056 CRYPTO_ALG_SUB_TYPE_CTR)
1057 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1058 reqctx->processed += bytes;
1059 wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
1060 wrparam.req = req;
1061 wrparam.bytes = bytes;
1062 skb = create_cipher_wr(&wrparam);
1063 if (IS_ERR(skb)) {
1064 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1065 err = PTR_ERR(skb);
1066 goto complete;
1067 }
1068 skb->dev = u_ctx->lldi.ports[0];
1069 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1070 chcr_send_wr(skb);
1071 return 0;
1072complete:
738bff48
HJ
1073 free_new_sg(reqctx->newdstsg);
1074 reqctx->newdstsg = NULL;
b8fd1f41
HJ
1075 req->base.complete(&req->base, err);
1076 return err;
1077}
1078
1079static int process_cipher(struct ablkcipher_request *req,
1080 unsigned short qid,
1081 struct sk_buff **skb,
1082 unsigned short op_type)
1083{
1084 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1085 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1086 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1087 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1088 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1089 struct cipher_wr_param wrparam;
738bff48 1090 int bytes, nents, err = -EINVAL;
b8fd1f41
HJ
1091
1092 reqctx->newdstsg = NULL;
1093 reqctx->processed = 0;
1094 if (!req->info)
1095 goto error;
1096 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1097 (req->nbytes == 0) ||
1098 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1099 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1100 ablkctx->enckey_len, req->nbytes, ivsize);
1101 goto error;
1102 }
1103 wrparam.srcsg = req->src;
738bff48
HJ
1104 if (is_newsg(req->dst, &nents)) {
1105 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1106 if (IS_ERR(reqctx->newdstsg))
1107 return PTR_ERR(reqctx->newdstsg);
1108 reqctx->dstsg = reqctx->newdstsg;
1109 } else {
1110 reqctx->dstsg = req->dst;
1111 }
b8fd1f41
HJ
1112 bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
1113 SPACE_LEFT(ablkctx->enckey_len),
1114 &wrparam.snent,
1115 &reqctx->dst_nents);
1116 if ((bytes + reqctx->processed) >= req->nbytes)
1117 bytes = req->nbytes - reqctx->processed;
1118 else
1119 bytes = ROUND_16(bytes);
1120 if (unlikely(bytes > req->nbytes))
1121 bytes = req->nbytes;
1122 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1123 CRYPTO_ALG_SUB_TYPE_CTR) {
1124 bytes = adjust_ctr_overflow(req->info, bytes);
1125 }
1126 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1127 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1128 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1129 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1130 CTR_RFC3686_IV_SIZE);
1131
1132 /* initialize counter portion of counter block */
1133 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1134 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1135
1136 } else {
1137
1138 memcpy(reqctx->iv, req->info, ivsize);
1139 }
1140 if (unlikely(bytes == 0)) {
1141 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1142 req->base.flags,
1143 req->src,
1144 req->dst,
1145 req->nbytes,
1146 req->info,
1147 op_type);
1148 goto error;
1149 }
1150 reqctx->processed = bytes;
1151 reqctx->dst = reqctx->dstsg;
1152 reqctx->op = op_type;
1153 wrparam.qid = qid;
1154 wrparam.req = req;
1155 wrparam.bytes = bytes;
1156 *skb = create_cipher_wr(&wrparam);
1157 if (IS_ERR(*skb)) {
1158 err = PTR_ERR(*skb);
1159 goto error;
1160 }
1161
1162 return 0;
1163error:
738bff48
HJ
1164 free_new_sg(reqctx->newdstsg);
1165 reqctx->newdstsg = NULL;
b8fd1f41
HJ
1166 return err;
1167}
1168
1169static int chcr_aes_encrypt(struct ablkcipher_request *req)
1170{
1171 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1172 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1173 struct sk_buff *skb = NULL;
1174 int err;
1175 struct uld_ctx *u_ctx = ULD_CTX(ctx);
324429d7
HS
1176
1177 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 1178 ctx->tx_qidx))) {
324429d7
HS
1179 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1180 return -EBUSY;
1181 }
1182
b8fd1f41 1183 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
324429d7 1184 CHCR_ENCRYPT_OP);
b8fd1f41
HJ
1185 if (err || !skb)
1186 return err;
324429d7 1187 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1188 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1189 chcr_send_wr(skb);
1190 return -EINPROGRESS;
1191}
1192
1193static int chcr_aes_decrypt(struct ablkcipher_request *req)
1194{
1195 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1196 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
324429d7 1197 struct uld_ctx *u_ctx = ULD_CTX(ctx);
b8fd1f41
HJ
1198 struct sk_buff *skb = NULL;
1199 int err;
324429d7
HS
1200
1201 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 1202 ctx->tx_qidx))) {
324429d7
HS
1203 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1204 return -EBUSY;
1205 }
1206
b8fd1f41 1207 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
324429d7 1208 CHCR_DECRYPT_OP);
b8fd1f41
HJ
1209 if (err || !skb)
1210 return err;
324429d7 1211 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1212 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1213 chcr_send_wr(skb);
1214 return -EINPROGRESS;
1215}
1216
1217static int chcr_device_init(struct chcr_context *ctx)
1218{
1219 struct uld_ctx *u_ctx;
72a56ca9 1220 struct adapter *adap;
324429d7 1221 unsigned int id;
72a56ca9 1222 int txq_perchan, txq_idx, ntxq;
324429d7
HS
1223 int err = 0, rxq_perchan, rxq_idx;
1224
1225 id = smp_processor_id();
1226 if (!ctx->dev) {
1227 err = assign_chcr_device(&ctx->dev);
1228 if (err) {
1229 pr_err("chcr device assignment fails\n");
1230 goto out;
1231 }
1232 u_ctx = ULD_CTX(ctx);
72a56ca9
HJ
1233 adap = padap(ctx->dev);
1234 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1235 adap->vres.ncrypto_fc);
324429d7 1236 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
72a56ca9 1237 txq_perchan = ntxq / u_ctx->lldi.nchan;
324429d7
HS
1238 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1239 rxq_idx += id % rxq_perchan;
72a56ca9
HJ
1240 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1241 txq_idx += id % txq_perchan;
324429d7 1242 spin_lock(&ctx->dev->lock_chcr_dev);
72a56ca9
HJ
1243 ctx->rx_qidx = rxq_idx;
1244 ctx->tx_qidx = txq_idx;
ab677ff4 1245 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
8a13449f 1246 ctx->dev->rx_channel_id = 0;
324429d7
HS
1247 spin_unlock(&ctx->dev->lock_chcr_dev);
1248 }
1249out:
1250 return err;
1251}
1252
1253static int chcr_cra_init(struct crypto_tfm *tfm)
1254{
b8fd1f41
HJ
1255 struct crypto_alg *alg = tfm->__crt_alg;
1256 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1257 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1258
1259 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1260 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1261 if (IS_ERR(ablkctx->sw_cipher)) {
1262 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1263 return PTR_ERR(ablkctx->sw_cipher);
1264 }
324429d7
HS
1265 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1266 return chcr_device_init(crypto_tfm_ctx(tfm));
1267}
1268
b8fd1f41
HJ
1269static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1270{
1271 struct crypto_alg *alg = tfm->__crt_alg;
1272 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1273 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1274
1275 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1276 * cannot be used as fallback in chcr_handle_cipher_response
1277 */
1278 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1279 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1280 if (IS_ERR(ablkctx->sw_cipher)) {
1281 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1282 return PTR_ERR(ablkctx->sw_cipher);
1283 }
1284 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1285 return chcr_device_init(crypto_tfm_ctx(tfm));
1286}
1287
1288
1289static void chcr_cra_exit(struct crypto_tfm *tfm)
1290{
1291 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1292 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1293
1294 crypto_free_skcipher(ablkctx->sw_cipher);
1295}
1296
324429d7
HS
1297static int get_alg_config(struct algo_param *params,
1298 unsigned int auth_size)
1299{
1300 switch (auth_size) {
1301 case SHA1_DIGEST_SIZE:
1302 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1303 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1304 params->result_size = SHA1_DIGEST_SIZE;
1305 break;
1306 case SHA224_DIGEST_SIZE:
1307 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1308 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1309 params->result_size = SHA256_DIGEST_SIZE;
1310 break;
1311 case SHA256_DIGEST_SIZE:
1312 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1313 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1314 params->result_size = SHA256_DIGEST_SIZE;
1315 break;
1316 case SHA384_DIGEST_SIZE:
1317 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1318 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1319 params->result_size = SHA512_DIGEST_SIZE;
1320 break;
1321 case SHA512_DIGEST_SIZE:
1322 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1323 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1324 params->result_size = SHA512_DIGEST_SIZE;
1325 break;
1326 default:
1327 pr_err("chcr : ERROR, unsupported digest size\n");
1328 return -EINVAL;
1329 }
1330 return 0;
1331}
1332
e7922729 1333static inline void chcr_free_shash(struct crypto_shash *base_hash)
324429d7 1334{
e7922729 1335 crypto_free_shash(base_hash);
324429d7
HS
1336}
1337
1338/**
358961d1 1339 * create_hash_wr - Create hash work request
324429d7
HS
1340 * @req - Cipher req base
1341 */
358961d1 1342static struct sk_buff *create_hash_wr(struct ahash_request *req,
2debd332 1343 struct hash_wr_param *param)
324429d7
HS
1344{
1345 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1346 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1347 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1348 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1349 struct sk_buff *skb = NULL;
358961d1 1350 struct chcr_wr *chcr_req;
324429d7
HS
1351 unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
1352 unsigned int digestsize = crypto_ahash_digestsize(tfm);
358961d1 1353 unsigned int kctx_len = 0;
324429d7 1354 u8 hash_size_in_response = 0;
358961d1
HJ
1355 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1356 GFP_ATOMIC;
ee0863ba 1357 struct adapter *adap = padap(ctx->dev);
324429d7
HS
1358
1359 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
358961d1 1360 kctx_len = param->alg_prm.result_size + iopad_alignment;
324429d7
HS
1361 if (param->opad_needed)
1362 kctx_len += param->alg_prm.result_size + iopad_alignment;
1363
1364 if (req_ctx->result)
1365 hash_size_in_response = digestsize;
1366 else
1367 hash_size_in_response = param->alg_prm.result_size;
1368 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
358961d1 1369 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
324429d7
HS
1370 if (!skb)
1371 return skb;
1372
1373 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
358961d1
HJ
1374 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
1375 memset(chcr_req, 0, transhdr_len);
324429d7 1376
358961d1 1377 chcr_req->sec_cpl.op_ivinsrtofst =
8a13449f 1378 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
358961d1 1379 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
324429d7 1380
358961d1 1381 chcr_req->sec_cpl.aadstart_cipherstop_hi =
324429d7 1382 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
358961d1 1383 chcr_req->sec_cpl.cipherstop_lo_authinsert =
324429d7 1384 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
358961d1 1385 chcr_req->sec_cpl.seqno_numivs =
324429d7 1386 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
358961d1 1387 param->opad_needed, 0);
324429d7 1388
358961d1 1389 chcr_req->sec_cpl.ivgen_hdrlen =
324429d7
HS
1390 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1391
358961d1
HJ
1392 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1393 param->alg_prm.result_size);
324429d7
HS
1394
1395 if (param->opad_needed)
358961d1
HJ
1396 memcpy(chcr_req->key_ctx.key +
1397 ((param->alg_prm.result_size <= 32) ? 32 :
1398 CHCR_HASH_MAX_DIGEST_SIZE),
324429d7
HS
1399 hmacctx->opad, param->alg_prm.result_size);
1400
358961d1 1401 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
324429d7
HS
1402 param->alg_prm.mk_size, 0,
1403 param->opad_needed,
358961d1
HJ
1404 ((kctx_len +
1405 sizeof(chcr_req->key_ctx)) >> 4));
1406 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
324429d7
HS
1407
1408 skb_set_transport_header(skb, transhdr_len);
1409 if (param->bfr_len != 0)
44fce12a
HJ
1410 write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
1411 param->bfr_len);
324429d7 1412 if (param->sg_len != 0)
358961d1 1413 write_sg_to_skb(skb, &frags, req->src, param->sg_len);
ee0863ba 1414 atomic_inc(&adap->chcr_stats.digest_rqst);
b8fd1f41
HJ
1415 create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
1416 hash_size_in_response, 0, DUMMY_BYTES, 0);
324429d7
HS
1417 req_ctx->skb = skb;
1418 skb_get(skb);
1419 return skb;
1420}
1421
1422static int chcr_ahash_update(struct ahash_request *req)
1423{
1424 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1425 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1426 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1427 struct uld_ctx *u_ctx = NULL;
1428 struct sk_buff *skb;
1429 u8 remainder = 0, bs;
1430 unsigned int nbytes = req->nbytes;
1431 struct hash_wr_param params;
1432
1433 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1434
1435 u_ctx = ULD_CTX(ctx);
1436 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 1437 ctx->tx_qidx))) {
324429d7
HS
1438 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1439 return -EBUSY;
1440 }
1441
44fce12a
HJ
1442 if (nbytes + req_ctx->reqlen >= bs) {
1443 remainder = (nbytes + req_ctx->reqlen) % bs;
1444 nbytes = nbytes + req_ctx->reqlen - remainder;
324429d7 1445 } else {
44fce12a
HJ
1446 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1447 + req_ctx->reqlen, nbytes, 0);
1448 req_ctx->reqlen += nbytes;
324429d7
HS
1449 return 0;
1450 }
1451
1452 params.opad_needed = 0;
1453 params.more = 1;
1454 params.last = 0;
44fce12a
HJ
1455 params.sg_len = nbytes - req_ctx->reqlen;
1456 params.bfr_len = req_ctx->reqlen;
324429d7
HS
1457 params.scmd1 = 0;
1458 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1459 req_ctx->result = 0;
1460 req_ctx->data_len += params.sg_len + params.bfr_len;
358961d1 1461 skb = create_hash_wr(req, &params);
324429d7
HS
1462 if (!skb)
1463 return -ENOMEM;
1464
44fce12a
HJ
1465 if (remainder) {
1466 u8 *temp;
1467 /* Swap buffers */
1468 temp = req_ctx->reqbfr;
1469 req_ctx->reqbfr = req_ctx->skbfr;
1470 req_ctx->skbfr = temp;
324429d7 1471 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
44fce12a 1472 req_ctx->reqbfr, remainder, req->nbytes -
324429d7 1473 remainder);
44fce12a
HJ
1474 }
1475 req_ctx->reqlen = remainder;
324429d7 1476 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1477 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1478 chcr_send_wr(skb);
1479
1480 return -EINPROGRESS;
1481}
1482
1483static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1484{
1485 memset(bfr_ptr, 0, bs);
1486 *bfr_ptr = 0x80;
1487 if (bs == 64)
1488 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1489 else
1490 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1491}
1492
1493static int chcr_ahash_final(struct ahash_request *req)
1494{
1495 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1496 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1497 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1498 struct hash_wr_param params;
1499 struct sk_buff *skb;
1500 struct uld_ctx *u_ctx = NULL;
1501 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1502
1503 u_ctx = ULD_CTX(ctx);
1504 if (is_hmac(crypto_ahash_tfm(rtfm)))
1505 params.opad_needed = 1;
1506 else
1507 params.opad_needed = 0;
1508 params.sg_len = 0;
1509 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1510 req_ctx->result = 1;
44fce12a 1511 params.bfr_len = req_ctx->reqlen;
324429d7 1512 req_ctx->data_len += params.bfr_len + params.sg_len;
44fce12a
HJ
1513 if (req_ctx->reqlen == 0) {
1514 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1515 params.last = 0;
1516 params.more = 1;
1517 params.scmd1 = 0;
1518 params.bfr_len = bs;
1519
1520 } else {
1521 params.scmd1 = req_ctx->data_len;
1522 params.last = 1;
1523 params.more = 0;
1524 }
358961d1 1525 skb = create_hash_wr(req, &params);
9a97ffd4
DC
1526 if (!skb)
1527 return -ENOMEM;
358961d1 1528
324429d7 1529 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1530 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1531 chcr_send_wr(skb);
1532 return -EINPROGRESS;
1533}
1534
1535static int chcr_ahash_finup(struct ahash_request *req)
1536{
1537 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1538 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1539 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1540 struct uld_ctx *u_ctx = NULL;
1541 struct sk_buff *skb;
1542 struct hash_wr_param params;
1543 u8 bs;
1544
1545 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1546 u_ctx = ULD_CTX(ctx);
1547
1548 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 1549 ctx->tx_qidx))) {
324429d7
HS
1550 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1551 return -EBUSY;
1552 }
1553
1554 if (is_hmac(crypto_ahash_tfm(rtfm)))
1555 params.opad_needed = 1;
1556 else
1557 params.opad_needed = 0;
1558
1559 params.sg_len = req->nbytes;
44fce12a 1560 params.bfr_len = req_ctx->reqlen;
324429d7
HS
1561 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1562 req_ctx->data_len += params.bfr_len + params.sg_len;
1563 req_ctx->result = 1;
44fce12a
HJ
1564 if ((req_ctx->reqlen + req->nbytes) == 0) {
1565 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
324429d7
HS
1566 params.last = 0;
1567 params.more = 1;
1568 params.scmd1 = 0;
1569 params.bfr_len = bs;
1570 } else {
1571 params.scmd1 = req_ctx->data_len;
1572 params.last = 1;
1573 params.more = 0;
1574 }
1575
358961d1 1576 skb = create_hash_wr(req, &params);
324429d7
HS
1577 if (!skb)
1578 return -ENOMEM;
358961d1 1579
324429d7 1580 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1581 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1582 chcr_send_wr(skb);
1583
1584 return -EINPROGRESS;
1585}
1586
1587static int chcr_ahash_digest(struct ahash_request *req)
1588{
1589 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1590 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1591 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1592 struct uld_ctx *u_ctx = NULL;
1593 struct sk_buff *skb;
1594 struct hash_wr_param params;
1595 u8 bs;
1596
1597 rtfm->init(req);
1598 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1599
1600 u_ctx = ULD_CTX(ctx);
1601 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 1602 ctx->tx_qidx))) {
324429d7
HS
1603 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1604 return -EBUSY;
1605 }
1606
1607 if (is_hmac(crypto_ahash_tfm(rtfm)))
1608 params.opad_needed = 1;
1609 else
1610 params.opad_needed = 0;
1611
1612 params.last = 0;
1613 params.more = 0;
1614 params.sg_len = req->nbytes;
1615 params.bfr_len = 0;
1616 params.scmd1 = 0;
1617 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1618 req_ctx->result = 1;
1619 req_ctx->data_len += params.bfr_len + params.sg_len;
1620
44fce12a
HJ
1621 if (req->nbytes == 0) {
1622 create_last_hash_block(req_ctx->reqbfr, bs, 0);
324429d7
HS
1623 params.more = 1;
1624 params.bfr_len = bs;
1625 }
1626
358961d1 1627 skb = create_hash_wr(req, &params);
324429d7
HS
1628 if (!skb)
1629 return -ENOMEM;
1630
1631 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 1632 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
324429d7
HS
1633 chcr_send_wr(skb);
1634 return -EINPROGRESS;
1635}
1636
1637static int chcr_ahash_export(struct ahash_request *areq, void *out)
1638{
1639 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1640 struct chcr_ahash_req_ctx *state = out;
1641
44fce12a 1642 state->reqlen = req_ctx->reqlen;
324429d7 1643 state->data_len = req_ctx->data_len;
44fce12a 1644 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
324429d7
HS
1645 memcpy(state->partial_hash, req_ctx->partial_hash,
1646 CHCR_HASH_MAX_DIGEST_SIZE);
44fce12a 1647 return 0;
324429d7
HS
1648}
1649
1650static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1651{
1652 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1653 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1654
44fce12a 1655 req_ctx->reqlen = state->reqlen;
324429d7 1656 req_ctx->data_len = state->data_len;
44fce12a
HJ
1657 req_ctx->reqbfr = req_ctx->bfr1;
1658 req_ctx->skbfr = req_ctx->bfr2;
1659 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
324429d7
HS
1660 memcpy(req_ctx->partial_hash, state->partial_hash,
1661 CHCR_HASH_MAX_DIGEST_SIZE);
1662 return 0;
1663}
1664
1665static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1666 unsigned int keylen)
1667{
1668 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1669 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1670 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1671 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1672 unsigned int i, err = 0, updated_digestsize;
1673
e7922729
HJ
1674 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1675
1676 /* use the key to calculate the ipad and opad. ipad will sent with the
324429d7
HS
1677 * first request's data. opad will be sent with the final hash result
1678 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1679 */
e7922729
HJ
1680 shash->tfm = hmacctx->base_hash;
1681 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
324429d7 1682 if (keylen > bs) {
e7922729 1683 err = crypto_shash_digest(shash, key, keylen,
324429d7
HS
1684 hmacctx->ipad);
1685 if (err)
1686 goto out;
1687 keylen = digestsize;
1688 } else {
1689 memcpy(hmacctx->ipad, key, keylen);
1690 }
1691 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1692 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1693
1694 for (i = 0; i < bs / sizeof(int); i++) {
1695 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1696 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1697 }
1698
1699 updated_digestsize = digestsize;
1700 if (digestsize == SHA224_DIGEST_SIZE)
1701 updated_digestsize = SHA256_DIGEST_SIZE;
1702 else if (digestsize == SHA384_DIGEST_SIZE)
1703 updated_digestsize = SHA512_DIGEST_SIZE;
e7922729 1704 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
324429d7
HS
1705 hmacctx->ipad, digestsize);
1706 if (err)
1707 goto out;
1708 chcr_change_order(hmacctx->ipad, updated_digestsize);
1709
e7922729 1710 err = chcr_compute_partial_hash(shash, hmacctx->opad,
324429d7
HS
1711 hmacctx->opad, digestsize);
1712 if (err)
1713 goto out;
1714 chcr_change_order(hmacctx->opad, updated_digestsize);
1715out:
1716 return err;
1717}
1718
b8fd1f41 1719static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
324429d7
HS
1720 unsigned int key_len)
1721{
b8fd1f41 1722 struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
324429d7 1723 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
324429d7 1724 unsigned short context_size = 0;
b8fd1f41 1725 int err;
324429d7 1726
b8fd1f41
HJ
1727 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1728 if (err)
1729 goto badkey_err;
cc1b156d
HJ
1730
1731 memcpy(ablkctx->key, key, key_len);
1732 ablkctx->enckey_len = key_len;
1733 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1734 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1735 ablkctx->key_ctx_hdr =
1736 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1737 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1738 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1739 CHCR_KEYCTX_NO_KEY, 1,
1740 0, context_size);
1741 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1742 return 0;
b8fd1f41
HJ
1743badkey_err:
1744 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1745 ablkctx->enckey_len = 0;
1746
1747 return err;
324429d7
HS
1748}
1749
1750static int chcr_sha_init(struct ahash_request *areq)
1751{
1752 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1753 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1754 int digestsize = crypto_ahash_digestsize(tfm);
1755
1756 req_ctx->data_len = 0;
44fce12a
HJ
1757 req_ctx->reqlen = 0;
1758 req_ctx->reqbfr = req_ctx->bfr1;
1759 req_ctx->skbfr = req_ctx->bfr2;
324429d7
HS
1760 req_ctx->skb = NULL;
1761 req_ctx->result = 0;
1762 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1763 return 0;
1764}
1765
1766static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1767{
1768 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1769 sizeof(struct chcr_ahash_req_ctx));
1770 return chcr_device_init(crypto_tfm_ctx(tfm));
1771}
1772
1773static int chcr_hmac_init(struct ahash_request *areq)
1774{
1775 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1776 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1777 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1778 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1779 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1780 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1781
1782 chcr_sha_init(areq);
1783 req_ctx->data_len = bs;
1784 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1785 if (digestsize == SHA224_DIGEST_SIZE)
1786 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1787 SHA256_DIGEST_SIZE);
1788 else if (digestsize == SHA384_DIGEST_SIZE)
1789 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1790 SHA512_DIGEST_SIZE);
1791 else
1792 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1793 digestsize);
1794 }
1795 return 0;
1796}
1797
1798static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1799{
1800 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1801 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1802 unsigned int digestsize =
1803 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1804
1805 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1806 sizeof(struct chcr_ahash_req_ctx));
e7922729
HJ
1807 hmacctx->base_hash = chcr_alloc_shash(digestsize);
1808 if (IS_ERR(hmacctx->base_hash))
1809 return PTR_ERR(hmacctx->base_hash);
324429d7
HS
1810 return chcr_device_init(crypto_tfm_ctx(tfm));
1811}
1812
324429d7
HS
1813static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1814{
1815 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1816 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1817
e7922729
HJ
1818 if (hmacctx->base_hash) {
1819 chcr_free_shash(hmacctx->base_hash);
1820 hmacctx->base_hash = NULL;
324429d7
HS
1821 }
1822}
1823
738bff48
HJ
1824static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
1825{
1826 int nents = 0;
1827 int ret = 0;
1828
1829 while (sgl) {
1830 if (sgl->length > CHCR_SG_SIZE)
1831 ret = 1;
1832 nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
1833 sgl = sg_next(sgl);
1834 }
1835 *newents = nents;
1836 return ret;
1837}
1838
1839static inline void free_new_sg(struct scatterlist *sgl)
1840{
1841 kfree(sgl);
1842}
1843
1844static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
1845 unsigned int nents)
1846{
1847 struct scatterlist *newsg, *sg;
1848 int i, len, processed = 0;
1849 struct page *spage;
1850 int offset;
1851
1852 newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
1853 if (!newsg)
1854 return ERR_PTR(-ENOMEM);
1855 sg = newsg;
1856 sg_init_table(sg, nents);
1857 offset = sgl->offset;
1858 spage = sg_page(sgl);
1859 for (i = 0; i < nents; i++) {
1860 len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
1861 sg_set_page(sg, spage, len, offset);
1862 processed += len;
1863 offset += len;
1864 if (offset >= PAGE_SIZE) {
1865 offset = offset % PAGE_SIZE;
1866 spage++;
1867 }
1868 if (processed == sgl->length) {
1869 processed = 0;
1870 sgl = sg_next(sgl);
1871 if (!sgl)
1872 break;
1873 spage = sg_page(sgl);
1874 offset = sgl->offset;
1875 }
1876 sg = sg_next(sg);
1877 }
1878 return newsg;
1879}
1880
2debd332
HJ
1881static int chcr_copy_assoc(struct aead_request *req,
1882 struct chcr_aead_ctx *ctx)
1883{
1884 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1885
1886 skcipher_request_set_tfm(skreq, ctx->null);
1887 skcipher_request_set_callback(skreq, aead_request_flags(req),
1888 NULL, NULL);
1889 skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1890 NULL);
1891
1892 return crypto_skcipher_encrypt(skreq);
1893}
0e93708d
HJ
1894static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1895 int aadmax, int wrlen,
1896 unsigned short op_type)
1897{
1898 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1899
1900 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1901 (req->assoclen > aadmax) ||
1902 (src_nent > MAX_SKB_FRAGS) ||
1903 (wrlen > MAX_WR_SIZE))
1904 return 1;
1905 return 0;
1906}
2debd332 1907
0e93708d
HJ
1908static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1909{
1910 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1911 struct chcr_context *ctx = crypto_aead_ctx(tfm);
1912 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1913 struct aead_request *subreq = aead_request_ctx(req);
1914
1915 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1916 aead_request_set_callback(subreq, req->base.flags,
1917 req->base.complete, req->base.data);
1918 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1919 req->iv);
1920 aead_request_set_ad(subreq, req->assoclen);
1921 return op_type ? crypto_aead_decrypt(subreq) :
1922 crypto_aead_encrypt(subreq);
1923}
2debd332
HJ
1924
1925static struct sk_buff *create_authenc_wr(struct aead_request *req,
1926 unsigned short qid,
1927 int size,
1928 unsigned short op_type)
1929{
1930 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1931 struct chcr_context *ctx = crypto_aead_ctx(tfm);
1932 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1933 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1934 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1935 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1936 struct sk_buff *skb = NULL;
1937 struct chcr_wr *chcr_req;
1938 struct cpl_rx_phys_dsgl *phys_cpl;
1939 struct phys_sge_parm sg_param;
94e1dab1 1940 struct scatterlist *src;
2debd332
HJ
1941 unsigned int frags = 0, transhdr_len;
1942 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
738bff48 1943 unsigned int kctx_len = 0, nents;
2debd332
HJ
1944 unsigned short stop_offset = 0;
1945 unsigned int assoclen = req->assoclen;
1946 unsigned int authsize = crypto_aead_authsize(tfm);
5fe8c711 1947 int error = -EINVAL, src_nent;
2debd332
HJ
1948 int null = 0;
1949 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1950 GFP_ATOMIC;
ee0863ba 1951 struct adapter *adap = padap(ctx->dev);
2debd332 1952
738bff48
HJ
1953 reqctx->newdstsg = NULL;
1954 dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
1955 authsize);
1956 if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
2debd332
HJ
1957 goto err;
1958
1959 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1960 goto err;
0e93708d
HJ
1961 src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1962 if (src_nent < 0)
2debd332 1963 goto err;
94e1dab1 1964 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
94e1dab1 1965
2debd332 1966 if (req->src != req->dst) {
5fe8c711
HJ
1967 error = chcr_copy_assoc(req, aeadctx);
1968 if (error)
1969 return ERR_PTR(error);
738bff48
HJ
1970 }
1971 if (dst_size && is_newsg(req->dst, &nents)) {
1972 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
1973 if (IS_ERR(reqctx->newdstsg))
1974 return ERR_CAST(reqctx->newdstsg);
1975 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1976 reqctx->newdstsg, req->assoclen);
1977 } else {
1978 if (req->src == req->dst)
1979 reqctx->dst = src;
1980 else
1981 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
1982 req->dst, req->assoclen);
2debd332
HJ
1983 }
1984 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1985 null = 1;
1986 assoclen = 0;
1987 }
94e1dab1 1988 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2debd332 1989 (op_type ? -authsize : authsize));
0e93708d 1990 if (reqctx->dst_nents < 0) {
2debd332 1991 pr_err("AUTHENC:Invalid Destination sg entries\n");
5fe8c711 1992 error = -EINVAL;
2debd332
HJ
1993 goto err;
1994 }
1995 dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1996 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1997 - sizeof(chcr_req->key_ctx);
1998 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
0e93708d
HJ
1999 if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
2000 T6_MAX_AAD_SIZE,
2001 transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
2002 op_type)) {
ee0863ba 2003 atomic_inc(&adap->chcr_stats.fallback);
738bff48
HJ
2004 free_new_sg(reqctx->newdstsg);
2005 reqctx->newdstsg = NULL;
0e93708d
HJ
2006 return ERR_PTR(chcr_aead_fallback(req, op_type));
2007 }
2debd332 2008 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
5fe8c711
HJ
2009 if (!skb) {
2010 error = -ENOMEM;
2debd332 2011 goto err;
5fe8c711 2012 }
2debd332
HJ
2013
2014 /* LLD is going to write the sge hdr. */
2015 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2016
2017 /* Write WR */
2018 chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
2019 memset(chcr_req, 0, transhdr_len);
2020
2021 stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2022
2023 /*
2024 * Input order is AAD,IV and Payload. where IV should be included as
2025 * the part of authdata. All other fields should be filled according
2026 * to the hardware spec
2027 */
2028 chcr_req->sec_cpl.op_ivinsrtofst =
8a13449f 2029 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
2debd332
HJ
2030 (ivsize ? (assoclen + 1) : 0));
2031 chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
2032 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2033 assoclen ? 1 : 0, assoclen,
2034 assoclen + ivsize + 1,
2035 (stop_offset & 0x1F0) >> 4);
2036 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2037 stop_offset & 0xF,
2038 null ? 0 : assoclen + ivsize + 1,
2039 stop_offset, stop_offset);
2040 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2041 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2042 CHCR_SCMD_CIPHER_MODE_AES_CBC,
2043 actx->auth_mode, aeadctx->hmac_ctrl,
2044 ivsize >> 1);
2045 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2046 0, 1, dst_size);
2047
2048 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2049 if (op_type == CHCR_ENCRYPT_OP)
2050 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2051 aeadctx->enckey_len);
2052 else
2053 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2054 aeadctx->enckey_len);
2055
2056 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2057 4), actx->h_iopad, kctx_len -
2058 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2059
2060 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2061 sg_param.nents = reqctx->dst_nents;
2062 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2063 sg_param.qid = qid;
5fe8c711
HJ
2064 error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2065 reqctx->dst, &sg_param);
2066 if (error)
2debd332
HJ
2067 goto dstmap_fail;
2068
2069 skb_set_transport_header(skb, transhdr_len);
2070
2071 if (assoclen) {
2072 /* AAD buffer in */
2073 write_sg_to_skb(skb, &frags, req->src, assoclen);
2074
2075 }
2076 write_buffer_to_skb(skb, &frags, req->iv, ivsize);
2077 write_sg_to_skb(skb, &frags, src, req->cryptlen);
ee0863ba 2078 atomic_inc(&adap->chcr_stats.cipher_rqst);
b8fd1f41 2079 create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2512a624 2080 sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2debd332
HJ
2081 reqctx->skb = skb;
2082 skb_get(skb);
2083
2084 return skb;
2085dstmap_fail:
2086 /* ivmap_fail: */
2087 kfree_skb(skb);
2088err:
738bff48
HJ
2089 free_new_sg(reqctx->newdstsg);
2090 reqctx->newdstsg = NULL;
5fe8c711 2091 return ERR_PTR(error);
2debd332
HJ
2092}
2093
2debd332
HJ
2094static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2095{
2096 __be32 data;
2097
2098 memset(block, 0, csize);
2099 block += csize;
2100
2101 if (csize >= 4)
2102 csize = 4;
2103 else if (msglen > (unsigned int)(1 << (8 * csize)))
2104 return -EOVERFLOW;
2105
2106 data = cpu_to_be32(msglen);
2107 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2108
2109 return 0;
2110}
2111
2112static void generate_b0(struct aead_request *req,
2113 struct chcr_aead_ctx *aeadctx,
2114 unsigned short op_type)
2115{
2116 unsigned int l, lp, m;
2117 int rc;
2118 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2119 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2120 u8 *b0 = reqctx->scratch_pad;
2121
2122 m = crypto_aead_authsize(aead);
2123
2124 memcpy(b0, reqctx->iv, 16);
2125
2126 lp = b0[0];
2127 l = lp + 1;
2128
2129 /* set m, bits 3-5 */
2130 *b0 |= (8 * ((m - 2) / 2));
2131
2132 /* set adata, bit 6, if associated data is used */
2133 if (req->assoclen)
2134 *b0 |= 64;
2135 rc = set_msg_len(b0 + 16 - l,
2136 (op_type == CHCR_DECRYPT_OP) ?
2137 req->cryptlen - m : req->cryptlen, l);
2138}
2139
2140static inline int crypto_ccm_check_iv(const u8 *iv)
2141{
2142 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2143 if (iv[0] < 1 || iv[0] > 7)
2144 return -EINVAL;
2145
2146 return 0;
2147}
2148
2149static int ccm_format_packet(struct aead_request *req,
2150 struct chcr_aead_ctx *aeadctx,
2151 unsigned int sub_type,
2152 unsigned short op_type)
2153{
2154 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2155 int rc = 0;
2156
2debd332
HJ
2157 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2158 reqctx->iv[0] = 3;
2159 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2160 memcpy(reqctx->iv + 4, req->iv, 8);
2161 memset(reqctx->iv + 12, 0, 4);
2162 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2163 htons(req->assoclen - 8);
2164 } else {
2165 memcpy(reqctx->iv, req->iv, 16);
2166 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2167 htons(req->assoclen);
2168 }
2169 generate_b0(req, aeadctx, op_type);
2170 /* zero the ctr value */
2171 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2172 return rc;
2173}
2174
2175static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2176 unsigned int dst_size,
2177 struct aead_request *req,
2178 unsigned short op_type,
2179 struct chcr_context *chcrctx)
2180{
2181 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0a7bd30c 2182 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2debd332
HJ
2183 unsigned int ivsize = AES_BLOCK_SIZE;
2184 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2185 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
8a13449f 2186 unsigned int c_id = chcrctx->dev->rx_channel_id;
2debd332
HJ
2187 unsigned int ccm_xtra;
2188 unsigned char tag_offset = 0, auth_offset = 0;
2debd332
HJ
2189 unsigned int assoclen;
2190
2191 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2192 assoclen = req->assoclen - 8;
2193 else
2194 assoclen = req->assoclen;
2195 ccm_xtra = CCM_B0_SIZE +
2196 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2197
2198 auth_offset = req->cryptlen ?
2199 (assoclen + ivsize + 1 + ccm_xtra) : 0;
2200 if (op_type == CHCR_DECRYPT_OP) {
2201 if (crypto_aead_authsize(tfm) != req->cryptlen)
2202 tag_offset = crypto_aead_authsize(tfm);
2203 else
2204 auth_offset = 0;
2205 }
2206
2207
2208 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2209 2, (ivsize ? (assoclen + 1) : 0) +
2210 ccm_xtra);
2211 sec_cpl->pldlen =
2212 htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
2213 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2214 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2215 1, assoclen + ccm_xtra, assoclen
2216 + ivsize + 1 + ccm_xtra, 0);
2217
2218 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2219 auth_offset, tag_offset,
2220 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2221 crypto_aead_authsize(tfm));
2222 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2223 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
0a7bd30c
HJ
2224 cipher_mode, mac_mode,
2225 aeadctx->hmac_ctrl, ivsize >> 1);
2debd332
HJ
2226
2227 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2228 1, dst_size);
2229}
2230
2231int aead_ccm_validate_input(unsigned short op_type,
2232 struct aead_request *req,
2233 struct chcr_aead_ctx *aeadctx,
2234 unsigned int sub_type)
2235{
2236 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2237 if (crypto_ccm_check_iv(req->iv)) {
2238 pr_err("CCM: IV check fails\n");
2239 return -EINVAL;
2240 }
2241 } else {
2242 if (req->assoclen != 16 && req->assoclen != 20) {
2243 pr_err("RFC4309: Invalid AAD length %d\n",
2244 req->assoclen);
2245 return -EINVAL;
2246 }
2247 }
2248 if (aeadctx->enckey_len == 0) {
2249 pr_err("CCM: Encryption key not set\n");
2250 return -EINVAL;
2251 }
2252 return 0;
2253}
2254
2255unsigned int fill_aead_req_fields(struct sk_buff *skb,
2256 struct aead_request *req,
2257 struct scatterlist *src,
2258 unsigned int ivsize,
2259 struct chcr_aead_ctx *aeadctx)
2260{
2261 unsigned int frags = 0;
2262 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2263 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2264 /* b0 and aad length(if available) */
2265
2266 write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
2267 (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
2268 if (req->assoclen) {
2269 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2270 write_sg_to_skb(skb, &frags, req->src,
2271 req->assoclen - 8);
2272 else
2273 write_sg_to_skb(skb, &frags, req->src, req->assoclen);
2274 }
2275 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
2276 if (req->cryptlen)
2277 write_sg_to_skb(skb, &frags, src, req->cryptlen);
2278
2279 return frags;
2280}
2281
2282static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2283 unsigned short qid,
2284 int size,
2285 unsigned short op_type)
2286{
2287 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2288 struct chcr_context *ctx = crypto_aead_ctx(tfm);
2289 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2290 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2291 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2292 struct sk_buff *skb = NULL;
2293 struct chcr_wr *chcr_req;
2294 struct cpl_rx_phys_dsgl *phys_cpl;
2295 struct phys_sge_parm sg_param;
94e1dab1 2296 struct scatterlist *src;
2debd332 2297 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
738bff48 2298 unsigned int dst_size = 0, kctx_len, nents;
2debd332
HJ
2299 unsigned int sub_type;
2300 unsigned int authsize = crypto_aead_authsize(tfm);
5fe8c711 2301 int error = -EINVAL, src_nent;
2debd332
HJ
2302 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2303 GFP_ATOMIC;
ee0863ba 2304 struct adapter *adap = padap(ctx->dev);
2debd332 2305
738bff48
HJ
2306 dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
2307 authsize);
2308 reqctx->newdstsg = NULL;
2debd332
HJ
2309 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2310 goto err;
0e93708d
HJ
2311 src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
2312 if (src_nent < 0)
2debd332 2313 goto err;
0e93708d 2314
2debd332 2315 sub_type = get_aead_subtype(tfm);
94e1dab1 2316 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
2debd332 2317 if (req->src != req->dst) {
5fe8c711
HJ
2318 error = chcr_copy_assoc(req, aeadctx);
2319 if (error) {
2debd332 2320 pr_err("AAD copy to destination buffer fails\n");
5fe8c711 2321 return ERR_PTR(error);
2debd332 2322 }
738bff48
HJ
2323 }
2324 if (dst_size && is_newsg(req->dst, &nents)) {
2325 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2326 if (IS_ERR(reqctx->newdstsg))
2327 return ERR_CAST(reqctx->newdstsg);
2328 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2329 reqctx->newdstsg, req->assoclen);
2330 } else {
2331 if (req->src == req->dst)
2332 reqctx->dst = src;
2333 else
2334 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2335 req->dst, req->assoclen);
2debd332 2336 }
94e1dab1 2337 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2debd332 2338 (op_type ? -authsize : authsize));
0e93708d 2339 if (reqctx->dst_nents < 0) {
2debd332 2340 pr_err("CCM:Invalid Destination sg entries\n");
5fe8c711 2341 error = -EINVAL;
2debd332
HJ
2342 goto err;
2343 }
5fe8c711
HJ
2344 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2345 if (error)
2debd332
HJ
2346 goto err;
2347
2348 dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2349 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2350 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
0e93708d
HJ
2351 if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
2352 T6_MAX_AAD_SIZE - 18,
2353 transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
2354 op_type)) {
ee0863ba 2355 atomic_inc(&adap->chcr_stats.fallback);
738bff48
HJ
2356 free_new_sg(reqctx->newdstsg);
2357 reqctx->newdstsg = NULL;
0e93708d
HJ
2358 return ERR_PTR(chcr_aead_fallback(req, op_type));
2359 }
2360
2debd332
HJ
2361 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
2362
5fe8c711
HJ
2363 if (!skb) {
2364 error = -ENOMEM;
2debd332 2365 goto err;
5fe8c711 2366 }
2debd332
HJ
2367
2368 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2369
2370 chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
2371 memset(chcr_req, 0, transhdr_len);
2372
2373 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
2374
2375 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2376 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2377 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2378 16), aeadctx->key, aeadctx->enckey_len);
2379
2380 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
5fe8c711
HJ
2381 error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2382 if (error)
2debd332
HJ
2383 goto dstmap_fail;
2384
2385 sg_param.nents = reqctx->dst_nents;
2386 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2387 sg_param.qid = qid;
5fe8c711
HJ
2388 error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2389 reqctx->dst, &sg_param);
2390 if (error)
2debd332
HJ
2391 goto dstmap_fail;
2392
2393 skb_set_transport_header(skb, transhdr_len);
2394 frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
ee0863ba 2395 atomic_inc(&adap->chcr_stats.aead_rqst);
b8fd1f41 2396 create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
2512a624 2397 sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
2debd332
HJ
2398 reqctx->skb = skb;
2399 skb_get(skb);
2400 return skb;
2401dstmap_fail:
2402 kfree_skb(skb);
2debd332 2403err:
738bff48
HJ
2404 free_new_sg(reqctx->newdstsg);
2405 reqctx->newdstsg = NULL;
5fe8c711 2406 return ERR_PTR(error);
2debd332
HJ
2407}
2408
2409static struct sk_buff *create_gcm_wr(struct aead_request *req,
2410 unsigned short qid,
2411 int size,
2412 unsigned short op_type)
2413{
2414 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2415 struct chcr_context *ctx = crypto_aead_ctx(tfm);
2416 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2417 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2418 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2419 struct sk_buff *skb = NULL;
2420 struct chcr_wr *chcr_req;
2421 struct cpl_rx_phys_dsgl *phys_cpl;
2422 struct phys_sge_parm sg_param;
94e1dab1 2423 struct scatterlist *src;
2debd332
HJ
2424 unsigned int frags = 0, transhdr_len;
2425 unsigned int ivsize = AES_BLOCK_SIZE;
738bff48 2426 unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
2debd332 2427 unsigned char tag_offset = 0;
2debd332 2428 unsigned int authsize = crypto_aead_authsize(tfm);
5fe8c711 2429 int error = -EINVAL, src_nent;
2debd332
HJ
2430 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2431 GFP_ATOMIC;
ee0863ba 2432 struct adapter *adap = padap(ctx->dev);
2debd332 2433
738bff48
HJ
2434 reqctx->newdstsg = NULL;
2435 dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2436 authsize);
2debd332
HJ
2437 /* validate key size */
2438 if (aeadctx->enckey_len == 0)
2439 goto err;
2440
2441 if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
2442 goto err;
d600fc8a 2443 src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
0e93708d 2444 if (src_nent < 0)
2debd332
HJ
2445 goto err;
2446
d600fc8a 2447 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
2debd332 2448 if (req->src != req->dst) {
5fe8c711
HJ
2449 error = chcr_copy_assoc(req, aeadctx);
2450 if (error)
2451 return ERR_PTR(error);
2debd332
HJ
2452 }
2453
738bff48
HJ
2454 if (dst_size && is_newsg(req->dst, &nents)) {
2455 reqctx->newdstsg = alloc_new_sg(req->dst, nents);
2456 if (IS_ERR(reqctx->newdstsg))
2457 return ERR_CAST(reqctx->newdstsg);
2458 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2459 reqctx->newdstsg, assoclen);
2460 } else {
2461 if (req->src == req->dst)
2462 reqctx->dst = src;
2463 else
2464 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
2465 req->dst, assoclen);
2466 }
d600fc8a 2467
94e1dab1 2468 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
2debd332 2469 (op_type ? -authsize : authsize));
0e93708d 2470 if (reqctx->dst_nents < 0) {
2debd332 2471 pr_err("GCM:Invalid Destination sg entries\n");
5fe8c711 2472 error = -EINVAL;
2debd332
HJ
2473 goto err;
2474 }
2475
2476
2477 dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
2478 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2479 AEAD_H_SIZE;
2480 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
0e93708d
HJ
2481 if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
2482 T6_MAX_AAD_SIZE,
2483 transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
2484 op_type)) {
ee0863ba 2485 atomic_inc(&adap->chcr_stats.fallback);
738bff48
HJ
2486 free_new_sg(reqctx->newdstsg);
2487 reqctx->newdstsg = NULL;
0e93708d
HJ
2488 return ERR_PTR(chcr_aead_fallback(req, op_type));
2489 }
2debd332 2490 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
5fe8c711
HJ
2491 if (!skb) {
2492 error = -ENOMEM;
2debd332 2493 goto err;
5fe8c711 2494 }
2debd332
HJ
2495
2496 /* NIC driver is going to write the sge hdr. */
2497 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
2498
2499 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
2500 memset(chcr_req, 0, transhdr_len);
2501
2502 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
d600fc8a 2503 assoclen = req->assoclen - 8;
2debd332
HJ
2504
2505 tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2506 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
8a13449f 2507 ctx->dev->rx_channel_id, 2, (ivsize ?
d600fc8a 2508 (assoclen + 1) : 0));
0e93708d 2509 chcr_req->sec_cpl.pldlen =
d600fc8a 2510 htonl(assoclen + ivsize + req->cryptlen);
2debd332 2511 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
d600fc8a
HJ
2512 assoclen ? 1 : 0, assoclen,
2513 assoclen + ivsize + 1, 0);
2debd332 2514 chcr_req->sec_cpl.cipherstop_lo_authinsert =
d600fc8a 2515 FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
2debd332
HJ
2516 tag_offset, tag_offset);
2517 chcr_req->sec_cpl.seqno_numivs =
2518 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2519 CHCR_ENCRYPT_OP) ? 1 : 0,
2520 CHCR_SCMD_CIPHER_MODE_AES_GCM,
0a7bd30c
HJ
2521 CHCR_SCMD_AUTH_MODE_GHASH,
2522 aeadctx->hmac_ctrl, ivsize >> 1);
2debd332
HJ
2523 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2524 0, 1, dst_size);
2525 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2526 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2527 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2528 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2529
2530 /* prepare a 16 byte iv */
2531 /* S A L T | IV | 0x00000001 */
2532 if (get_aead_subtype(tfm) ==
2533 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2534 memcpy(reqctx->iv, aeadctx->salt, 4);
2535 memcpy(reqctx->iv + 4, req->iv, 8);
2536 } else {
2537 memcpy(reqctx->iv, req->iv, 12);
2538 }
2539 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2540
2541 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2542 sg_param.nents = reqctx->dst_nents;
2543 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
2544 sg_param.qid = qid;
5fe8c711
HJ
2545 error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
2546 reqctx->dst, &sg_param);
2547 if (error)
2debd332
HJ
2548 goto dstmap_fail;
2549
2550 skb_set_transport_header(skb, transhdr_len);
d600fc8a 2551 write_sg_to_skb(skb, &frags, req->src, assoclen);
2debd332 2552 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
0e93708d 2553 write_sg_to_skb(skb, &frags, src, req->cryptlen);
ee0863ba 2554 atomic_inc(&adap->chcr_stats.aead_rqst);
b8fd1f41 2555 create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
2512a624
HJ
2556 sizeof(struct cpl_rx_phys_dsgl) + dst_size,
2557 reqctx->verify);
2debd332
HJ
2558 reqctx->skb = skb;
2559 skb_get(skb);
2560 return skb;
2561
2562dstmap_fail:
2563 /* ivmap_fail: */
2564 kfree_skb(skb);
2debd332 2565err:
738bff48
HJ
2566 free_new_sg(reqctx->newdstsg);
2567 reqctx->newdstsg = NULL;
5fe8c711 2568 return ERR_PTR(error);
2debd332
HJ
2569}
2570
2571
2572
2573static int chcr_aead_cra_init(struct crypto_aead *tfm)
2574{
2575 struct chcr_context *ctx = crypto_aead_ctx(tfm);
2576 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
0e93708d
HJ
2577 struct aead_alg *alg = crypto_aead_alg(tfm);
2578
2579 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
5fe8c711
HJ
2580 CRYPTO_ALG_NEED_FALLBACK |
2581 CRYPTO_ALG_ASYNC);
0e93708d
HJ
2582 if (IS_ERR(aeadctx->sw_cipher))
2583 return PTR_ERR(aeadctx->sw_cipher);
2584 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2585 sizeof(struct aead_request) +
2586 crypto_aead_reqsize(aeadctx->sw_cipher)));
2debd332
HJ
2587 aeadctx->null = crypto_get_default_null_skcipher();
2588 if (IS_ERR(aeadctx->null))
2589 return PTR_ERR(aeadctx->null);
2590 return chcr_device_init(ctx);
2591}
2592
2593static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2594{
0e93708d
HJ
2595 struct chcr_context *ctx = crypto_aead_ctx(tfm);
2596 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2597
2debd332 2598 crypto_put_default_null_skcipher();
0e93708d 2599 crypto_free_aead(aeadctx->sw_cipher);
2debd332
HJ
2600}
2601
2602static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2603 unsigned int authsize)
2604{
2605 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2606
2607 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2608 aeadctx->mayverify = VERIFY_HW;
0e93708d 2609 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2610}
2611static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2612 unsigned int authsize)
2613{
2614 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2615 u32 maxauth = crypto_aead_maxauthsize(tfm);
2616
2617 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2618 * true for sha1. authsize == 12 condition should be before
2619 * authsize == (maxauth >> 1)
2620 */
2621 if (authsize == ICV_4) {
2622 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2623 aeadctx->mayverify = VERIFY_HW;
2624 } else if (authsize == ICV_6) {
2625 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2626 aeadctx->mayverify = VERIFY_HW;
2627 } else if (authsize == ICV_10) {
2628 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2629 aeadctx->mayverify = VERIFY_HW;
2630 } else if (authsize == ICV_12) {
2631 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2632 aeadctx->mayverify = VERIFY_HW;
2633 } else if (authsize == ICV_14) {
2634 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2635 aeadctx->mayverify = VERIFY_HW;
2636 } else if (authsize == (maxauth >> 1)) {
2637 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2638 aeadctx->mayverify = VERIFY_HW;
2639 } else if (authsize == maxauth) {
2640 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2641 aeadctx->mayverify = VERIFY_HW;
2642 } else {
2643 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2644 aeadctx->mayverify = VERIFY_SW;
2645 }
0e93708d 2646 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2647}
2648
2649
2650static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2651{
2652 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2653
2654 switch (authsize) {
2655 case ICV_4:
2656 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2657 aeadctx->mayverify = VERIFY_HW;
2658 break;
2659 case ICV_8:
2660 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2661 aeadctx->mayverify = VERIFY_HW;
2662 break;
2663 case ICV_12:
2664 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2665 aeadctx->mayverify = VERIFY_HW;
2666 break;
2667 case ICV_14:
2668 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2669 aeadctx->mayverify = VERIFY_HW;
2670 break;
2671 case ICV_16:
2672 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2673 aeadctx->mayverify = VERIFY_HW;
2674 break;
2675 case ICV_13:
2676 case ICV_15:
2677 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2678 aeadctx->mayverify = VERIFY_SW;
2679 break;
2680 default:
2681
2682 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2683 CRYPTO_TFM_RES_BAD_KEY_LEN);
2684 return -EINVAL;
2685 }
0e93708d 2686 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2687}
2688
2689static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2690 unsigned int authsize)
2691{
2692 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2693
2694 switch (authsize) {
2695 case ICV_8:
2696 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2697 aeadctx->mayverify = VERIFY_HW;
2698 break;
2699 case ICV_12:
2700 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2701 aeadctx->mayverify = VERIFY_HW;
2702 break;
2703 case ICV_16:
2704 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2705 aeadctx->mayverify = VERIFY_HW;
2706 break;
2707 default:
2708 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2709 CRYPTO_TFM_RES_BAD_KEY_LEN);
2710 return -EINVAL;
2711 }
0e93708d 2712 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2713}
2714
2715static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2716 unsigned int authsize)
2717{
2718 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2719
2720 switch (authsize) {
2721 case ICV_4:
2722 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2723 aeadctx->mayverify = VERIFY_HW;
2724 break;
2725 case ICV_6:
2726 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2727 aeadctx->mayverify = VERIFY_HW;
2728 break;
2729 case ICV_8:
2730 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2731 aeadctx->mayverify = VERIFY_HW;
2732 break;
2733 case ICV_10:
2734 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2735 aeadctx->mayverify = VERIFY_HW;
2736 break;
2737 case ICV_12:
2738 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2739 aeadctx->mayverify = VERIFY_HW;
2740 break;
2741 case ICV_14:
2742 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2743 aeadctx->mayverify = VERIFY_HW;
2744 break;
2745 case ICV_16:
2746 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2747 aeadctx->mayverify = VERIFY_HW;
2748 break;
2749 default:
2750 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2751 CRYPTO_TFM_RES_BAD_KEY_LEN);
2752 return -EINVAL;
2753 }
0e93708d 2754 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2debd332
HJ
2755}
2756
0e93708d 2757static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2debd332
HJ
2758 const u8 *key,
2759 unsigned int keylen)
2760{
2761 struct chcr_context *ctx = crypto_aead_ctx(aead);
2762 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2763 unsigned char ck_size, mk_size;
2764 int key_ctx_size = 0;
2765
2debd332
HJ
2766 key_ctx_size = sizeof(struct _key_ctx) +
2767 ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
2768 if (keylen == AES_KEYSIZE_128) {
2769 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2770 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2771 } else if (keylen == AES_KEYSIZE_192) {
2772 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2773 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2774 } else if (keylen == AES_KEYSIZE_256) {
2775 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2776 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2777 } else {
2778 crypto_tfm_set_flags((struct crypto_tfm *)aead,
2779 CRYPTO_TFM_RES_BAD_KEY_LEN);
2780 aeadctx->enckey_len = 0;
2781 return -EINVAL;
2782 }
2783 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2784 key_ctx_size >> 4);
0e93708d
HJ
2785 memcpy(aeadctx->key, key, keylen);
2786 aeadctx->enckey_len = keylen;
2787
2debd332
HJ
2788 return 0;
2789}
2790
0e93708d
HJ
2791static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2792 const u8 *key,
2793 unsigned int keylen)
2794{
2795 struct chcr_context *ctx = crypto_aead_ctx(aead);
2796 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2797 int error;
2798
2799 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2800 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2801 CRYPTO_TFM_REQ_MASK);
2802 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2803 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2804 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2805 CRYPTO_TFM_RES_MASK);
2806 if (error)
2807 return error;
2808 return chcr_ccm_common_setkey(aead, key, keylen);
2809}
2810
2debd332
HJ
2811static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2812 unsigned int keylen)
2813{
2814 struct chcr_context *ctx = crypto_aead_ctx(aead);
4dbeae42
HJ
2815 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2816 int error;
2debd332
HJ
2817
2818 if (keylen < 3) {
2819 crypto_tfm_set_flags((struct crypto_tfm *)aead,
2820 CRYPTO_TFM_RES_BAD_KEY_LEN);
2821 aeadctx->enckey_len = 0;
2822 return -EINVAL;
2823 }
4dbeae42
HJ
2824 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2825 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2826 CRYPTO_TFM_REQ_MASK);
2827 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2828 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2829 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2830 CRYPTO_TFM_RES_MASK);
2831 if (error)
2832 return error;
2debd332
HJ
2833 keylen -= 3;
2834 memcpy(aeadctx->salt, key + keylen, 3);
0e93708d 2835 return chcr_ccm_common_setkey(aead, key, keylen);
2debd332
HJ
2836}
2837
2838static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2839 unsigned int keylen)
2840{
2841 struct chcr_context *ctx = crypto_aead_ctx(aead);
2842 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2843 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
8356ea51 2844 struct crypto_cipher *cipher;
2debd332
HJ
2845 unsigned int ck_size;
2846 int ret = 0, key_ctx_size = 0;
2847
0e93708d
HJ
2848 aeadctx->enckey_len = 0;
2849 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2850 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2851 & CRYPTO_TFM_REQ_MASK);
2852 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2853 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2854 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2855 CRYPTO_TFM_RES_MASK);
2856 if (ret)
2857 goto out;
2858
7c2cf1c4
HJ
2859 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2860 keylen > 3) {
2debd332
HJ
2861 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
2862 memcpy(aeadctx->salt, key + keylen, 4);
2863 }
2864 if (keylen == AES_KEYSIZE_128) {
2865 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2866 } else if (keylen == AES_KEYSIZE_192) {
2867 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2868 } else if (keylen == AES_KEYSIZE_256) {
2869 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2870 } else {
2871 crypto_tfm_set_flags((struct crypto_tfm *)aead,
2872 CRYPTO_TFM_RES_BAD_KEY_LEN);
0e93708d 2873 pr_err("GCM: Invalid key length %d\n", keylen);
2debd332
HJ
2874 ret = -EINVAL;
2875 goto out;
2876 }
2877
2878 memcpy(aeadctx->key, key, keylen);
2879 aeadctx->enckey_len = keylen;
2880 key_ctx_size = sizeof(struct _key_ctx) +
2881 ((DIV_ROUND_UP(keylen, 16)) << 4) +
2882 AEAD_H_SIZE;
2883 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2884 CHCR_KEYCTX_MAC_KEY_SIZE_128,
2885 0, 0,
2886 key_ctx_size >> 4);
8356ea51
HJ
2887 /* Calculate the H = CIPH(K, 0 repeated 16 times).
2888 * It will go in key context
2debd332 2889 */
8356ea51
HJ
2890 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2891 if (IS_ERR(cipher)) {
2debd332
HJ
2892 aeadctx->enckey_len = 0;
2893 ret = -ENOMEM;
2894 goto out;
2895 }
8356ea51
HJ
2896
2897 ret = crypto_cipher_setkey(cipher, key, keylen);
2debd332
HJ
2898 if (ret) {
2899 aeadctx->enckey_len = 0;
2900 goto out1;
2901 }
2902 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
8356ea51 2903 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2debd332
HJ
2904
2905out1:
8356ea51 2906 crypto_free_cipher(cipher);
2debd332
HJ
2907out:
2908 return ret;
2909}
2910
2911static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2912 unsigned int keylen)
2913{
2914 struct chcr_context *ctx = crypto_aead_ctx(authenc);
2915 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2916 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2917 /* it contains auth and cipher key both*/
2918 struct crypto_authenc_keys keys;
2919 unsigned int bs;
2920 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2921 int err = 0, i, key_ctx_len = 0;
2922 unsigned char ck_size = 0;
2923 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
ec1bca94 2924 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2debd332
HJ
2925 struct algo_param param;
2926 int align;
2927 u8 *o_ptr = NULL;
2928
0e93708d
HJ
2929 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2930 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2931 & CRYPTO_TFM_REQ_MASK);
2932 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2933 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2934 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2935 & CRYPTO_TFM_RES_MASK);
2936 if (err)
2937 goto out;
2938
2debd332
HJ
2939 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2940 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2941 goto out;
2942 }
2943
2944 if (get_alg_config(&param, max_authsize)) {
2945 pr_err("chcr : Unsupported digest size\n");
2946 goto out;
2947 }
2948 if (keys.enckeylen == AES_KEYSIZE_128) {
2949 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2950 } else if (keys.enckeylen == AES_KEYSIZE_192) {
2951 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2952 } else if (keys.enckeylen == AES_KEYSIZE_256) {
2953 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2954 } else {
2955 pr_err("chcr : Unsupported cipher key\n");
2956 goto out;
2957 }
2958
2959 /* Copy only encryption key. We use authkey to generate h(ipad) and
2960 * h(opad) so authkey is not needed again. authkeylen size have the
2961 * size of the hash digest size.
2962 */
2963 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2964 aeadctx->enckey_len = keys.enckeylen;
2965 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2966 aeadctx->enckey_len << 3);
2967
2968 base_hash = chcr_alloc_shash(max_authsize);
2969 if (IS_ERR(base_hash)) {
2970 pr_err("chcr : Base driver cannot be loaded\n");
0e93708d
HJ
2971 aeadctx->enckey_len = 0;
2972 return -EINVAL;
324429d7 2973 }
2debd332
HJ
2974 {
2975 SHASH_DESC_ON_STACK(shash, base_hash);
2976 shash->tfm = base_hash;
2977 shash->flags = crypto_shash_get_flags(base_hash);
2978 bs = crypto_shash_blocksize(base_hash);
2979 align = KEYCTX_ALIGN_PAD(max_authsize);
2980 o_ptr = actx->h_iopad + param.result_size + align;
2981
2982 if (keys.authkeylen > bs) {
2983 err = crypto_shash_digest(shash, keys.authkey,
2984 keys.authkeylen,
2985 o_ptr);
2986 if (err) {
2987 pr_err("chcr : Base driver cannot be loaded\n");
2988 goto out;
2989 }
2990 keys.authkeylen = max_authsize;
2991 } else
2992 memcpy(o_ptr, keys.authkey, keys.authkeylen);
2993
2994 /* Compute the ipad-digest*/
2995 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2996 memcpy(pad, o_ptr, keys.authkeylen);
2997 for (i = 0; i < bs >> 2; i++)
2998 *((unsigned int *)pad + i) ^= IPAD_DATA;
2999
3000 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3001 max_authsize))
3002 goto out;
3003 /* Compute the opad-digest */
3004 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3005 memcpy(pad, o_ptr, keys.authkeylen);
3006 for (i = 0; i < bs >> 2; i++)
3007 *((unsigned int *)pad + i) ^= OPAD_DATA;
3008
3009 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3010 goto out;
3011
3012 /* convert the ipad and opad digest to network order */
3013 chcr_change_order(actx->h_iopad, param.result_size);
3014 chcr_change_order(o_ptr, param.result_size);
3015 key_ctx_len = sizeof(struct _key_ctx) +
3016 ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3017 (param.result_size + align) * 2;
3018 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3019 0, 1, key_ctx_len >> 4);
3020 actx->auth_mode = param.auth_mode;
3021 chcr_free_shash(base_hash);
3022
3023 return 0;
3024 }
3025out:
3026 aeadctx->enckey_len = 0;
ec1bca94 3027 if (!IS_ERR(base_hash))
2debd332
HJ
3028 chcr_free_shash(base_hash);
3029 return -EINVAL;
324429d7
HS
3030}
3031
2debd332
HJ
3032static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3033 const u8 *key, unsigned int keylen)
3034{
3035 struct chcr_context *ctx = crypto_aead_ctx(authenc);
3036 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3037 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3038 struct crypto_authenc_keys keys;
0e93708d 3039 int err;
2debd332
HJ
3040 /* it contains auth and cipher key both*/
3041 int key_ctx_len = 0;
3042 unsigned char ck_size = 0;
3043
0e93708d
HJ
3044 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3045 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3046 & CRYPTO_TFM_REQ_MASK);
3047 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3048 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3049 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3050 & CRYPTO_TFM_RES_MASK);
3051 if (err)
3052 goto out;
3053
2debd332
HJ
3054 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3055 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3056 goto out;
3057 }
3058 if (keys.enckeylen == AES_KEYSIZE_128) {
3059 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3060 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3061 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3062 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3063 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3064 } else {
3065 pr_err("chcr : Unsupported cipher key\n");
3066 goto out;
3067 }
3068 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3069 aeadctx->enckey_len = keys.enckeylen;
3070 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3071 aeadctx->enckey_len << 3);
3072 key_ctx_len = sizeof(struct _key_ctx)
3073 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3074
3075 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3076 0, key_ctx_len >> 4);
3077 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3078 return 0;
3079out:
3080 aeadctx->enckey_len = 0;
3081 return -EINVAL;
3082}
3083static int chcr_aead_encrypt(struct aead_request *req)
3084{
3085 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3086 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3087
3088 reqctx->verify = VERIFY_HW;
3089
3090 switch (get_aead_subtype(tfm)) {
3091 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3092 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3093 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3094 create_authenc_wr);
3095 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3096 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3097 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3098 create_aead_ccm_wr);
3099 default:
3100 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3101 create_gcm_wr);
3102 }
3103}
3104
3105static int chcr_aead_decrypt(struct aead_request *req)
3106{
3107 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3108 struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
3109 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3110 int size;
3111
3112 if (aeadctx->mayverify == VERIFY_SW) {
3113 size = crypto_aead_maxauthsize(tfm);
3114 reqctx->verify = VERIFY_SW;
3115 } else {
3116 size = 0;
3117 reqctx->verify = VERIFY_HW;
3118 }
3119
3120 switch (get_aead_subtype(tfm)) {
3121 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3122 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3123 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3124 create_authenc_wr);
3125 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3126 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3127 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3128 create_aead_ccm_wr);
3129 default:
3130 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3131 create_gcm_wr);
3132 }
3133}
3134
3135static int chcr_aead_op(struct aead_request *req,
3136 unsigned short op_type,
3137 int size,
3138 create_wr_t create_wr_fn)
3139{
3140 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3141 struct chcr_context *ctx = crypto_aead_ctx(tfm);
5ba042c0 3142 struct uld_ctx *u_ctx;
2debd332
HJ
3143 struct sk_buff *skb;
3144
5ba042c0 3145 if (!ctx->dev) {
2debd332
HJ
3146 pr_err("chcr : %s : No crypto device.\n", __func__);
3147 return -ENXIO;
3148 }
5ba042c0 3149 u_ctx = ULD_CTX(ctx);
2debd332 3150 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
72a56ca9 3151 ctx->tx_qidx)) {
2debd332
HJ
3152 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3153 return -EBUSY;
3154 }
3155
3156 /* Form a WR from req */
72a56ca9 3157 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
2debd332
HJ
3158 op_type);
3159
0e93708d 3160 if (IS_ERR(skb) || !skb)
2debd332 3161 return PTR_ERR(skb);
2debd332
HJ
3162
3163 skb->dev = u_ctx->lldi.ports[0];
72a56ca9 3164 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
2debd332
HJ
3165 chcr_send_wr(skb);
3166 return -EINPROGRESS;
3167}
324429d7
HS
3168static struct chcr_alg_template driver_algs[] = {
3169 /* AES-CBC */
3170 {
b8fd1f41 3171 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
324429d7
HS
3172 .is_registered = 0,
3173 .alg.crypto = {
3174 .cra_name = "cbc(aes)",
2debd332 3175 .cra_driver_name = "cbc-aes-chcr",
324429d7 3176 .cra_blocksize = AES_BLOCK_SIZE,
324429d7 3177 .cra_init = chcr_cra_init,
b8fd1f41 3178 .cra_exit = chcr_cra_exit,
324429d7
HS
3179 .cra_u.ablkcipher = {
3180 .min_keysize = AES_MIN_KEY_SIZE,
3181 .max_keysize = AES_MAX_KEY_SIZE,
3182 .ivsize = AES_BLOCK_SIZE,
3183 .setkey = chcr_aes_cbc_setkey,
3184 .encrypt = chcr_aes_encrypt,
3185 .decrypt = chcr_aes_decrypt,
3186 }
3187 }
3188 },
3189 {
b8fd1f41 3190 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
324429d7
HS
3191 .is_registered = 0,
3192 .alg.crypto = {
3193 .cra_name = "xts(aes)",
2debd332 3194 .cra_driver_name = "xts-aes-chcr",
324429d7 3195 .cra_blocksize = AES_BLOCK_SIZE,
324429d7
HS
3196 .cra_init = chcr_cra_init,
3197 .cra_exit = NULL,
b8fd1f41 3198 .cra_u .ablkcipher = {
324429d7
HS
3199 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3200 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3201 .ivsize = AES_BLOCK_SIZE,
3202 .setkey = chcr_aes_xts_setkey,
3203 .encrypt = chcr_aes_encrypt,
3204 .decrypt = chcr_aes_decrypt,
3205 }
3206 }
b8fd1f41
HJ
3207 },
3208 {
3209 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3210 .is_registered = 0,
3211 .alg.crypto = {
3212 .cra_name = "ctr(aes)",
3213 .cra_driver_name = "ctr-aes-chcr",
3214 .cra_blocksize = 1,
3215 .cra_init = chcr_cra_init,
3216 .cra_exit = chcr_cra_exit,
3217 .cra_u.ablkcipher = {
3218 .min_keysize = AES_MIN_KEY_SIZE,
3219 .max_keysize = AES_MAX_KEY_SIZE,
3220 .ivsize = AES_BLOCK_SIZE,
3221 .setkey = chcr_aes_ctr_setkey,
3222 .encrypt = chcr_aes_encrypt,
3223 .decrypt = chcr_aes_decrypt,
3224 }
3225 }
3226 },
3227 {
3228 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3229 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3230 .is_registered = 0,
3231 .alg.crypto = {
3232 .cra_name = "rfc3686(ctr(aes))",
3233 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3234 .cra_blocksize = 1,
3235 .cra_init = chcr_rfc3686_init,
3236 .cra_exit = chcr_cra_exit,
3237 .cra_u.ablkcipher = {
3238 .min_keysize = AES_MIN_KEY_SIZE +
3239 CTR_RFC3686_NONCE_SIZE,
3240 .max_keysize = AES_MAX_KEY_SIZE +
3241 CTR_RFC3686_NONCE_SIZE,
3242 .ivsize = CTR_RFC3686_IV_SIZE,
3243 .setkey = chcr_aes_rfc3686_setkey,
3244 .encrypt = chcr_aes_encrypt,
3245 .decrypt = chcr_aes_decrypt,
3246 .geniv = "seqiv",
3247 }
324429d7
HS
3248 }
3249 },
3250 /* SHA */
3251 {
3252 .type = CRYPTO_ALG_TYPE_AHASH,
3253 .is_registered = 0,
3254 .alg.hash = {
3255 .halg.digestsize = SHA1_DIGEST_SIZE,
3256 .halg.base = {
3257 .cra_name = "sha1",
3258 .cra_driver_name = "sha1-chcr",
3259 .cra_blocksize = SHA1_BLOCK_SIZE,
3260 }
3261 }
3262 },
3263 {
3264 .type = CRYPTO_ALG_TYPE_AHASH,
3265 .is_registered = 0,
3266 .alg.hash = {
3267 .halg.digestsize = SHA256_DIGEST_SIZE,
3268 .halg.base = {
3269 .cra_name = "sha256",
3270 .cra_driver_name = "sha256-chcr",
3271 .cra_blocksize = SHA256_BLOCK_SIZE,
3272 }
3273 }
3274 },
3275 {
3276 .type = CRYPTO_ALG_TYPE_AHASH,
3277 .is_registered = 0,
3278 .alg.hash = {
3279 .halg.digestsize = SHA224_DIGEST_SIZE,
3280 .halg.base = {
3281 .cra_name = "sha224",
3282 .cra_driver_name = "sha224-chcr",
3283 .cra_blocksize = SHA224_BLOCK_SIZE,
3284 }
3285 }
3286 },
3287 {
3288 .type = CRYPTO_ALG_TYPE_AHASH,
3289 .is_registered = 0,
3290 .alg.hash = {
3291 .halg.digestsize = SHA384_DIGEST_SIZE,
3292 .halg.base = {
3293 .cra_name = "sha384",
3294 .cra_driver_name = "sha384-chcr",
3295 .cra_blocksize = SHA384_BLOCK_SIZE,
3296 }
3297 }
3298 },
3299 {
3300 .type = CRYPTO_ALG_TYPE_AHASH,
3301 .is_registered = 0,
3302 .alg.hash = {
3303 .halg.digestsize = SHA512_DIGEST_SIZE,
3304 .halg.base = {
3305 .cra_name = "sha512",
3306 .cra_driver_name = "sha512-chcr",
3307 .cra_blocksize = SHA512_BLOCK_SIZE,
3308 }
3309 }
3310 },
3311 /* HMAC */
3312 {
3313 .type = CRYPTO_ALG_TYPE_HMAC,
3314 .is_registered = 0,
3315 .alg.hash = {
3316 .halg.digestsize = SHA1_DIGEST_SIZE,
3317 .halg.base = {
3318 .cra_name = "hmac(sha1)",
2debd332 3319 .cra_driver_name = "hmac-sha1-chcr",
324429d7
HS
3320 .cra_blocksize = SHA1_BLOCK_SIZE,
3321 }
3322 }
3323 },
3324 {
3325 .type = CRYPTO_ALG_TYPE_HMAC,
3326 .is_registered = 0,
3327 .alg.hash = {
3328 .halg.digestsize = SHA224_DIGEST_SIZE,
3329 .halg.base = {
3330 .cra_name = "hmac(sha224)",
2debd332 3331 .cra_driver_name = "hmac-sha224-chcr",
324429d7
HS
3332 .cra_blocksize = SHA224_BLOCK_SIZE,
3333 }
3334 }
3335 },
3336 {
3337 .type = CRYPTO_ALG_TYPE_HMAC,
3338 .is_registered = 0,
3339 .alg.hash = {
3340 .halg.digestsize = SHA256_DIGEST_SIZE,
3341 .halg.base = {
3342 .cra_name = "hmac(sha256)",
2debd332 3343 .cra_driver_name = "hmac-sha256-chcr",
324429d7
HS
3344 .cra_blocksize = SHA256_BLOCK_SIZE,
3345 }
3346 }
3347 },
3348 {
3349 .type = CRYPTO_ALG_TYPE_HMAC,
3350 .is_registered = 0,
3351 .alg.hash = {
3352 .halg.digestsize = SHA384_DIGEST_SIZE,
3353 .halg.base = {
3354 .cra_name = "hmac(sha384)",
2debd332 3355 .cra_driver_name = "hmac-sha384-chcr",
324429d7
HS
3356 .cra_blocksize = SHA384_BLOCK_SIZE,
3357 }
3358 }
3359 },
3360 {
3361 .type = CRYPTO_ALG_TYPE_HMAC,
3362 .is_registered = 0,
3363 .alg.hash = {
3364 .halg.digestsize = SHA512_DIGEST_SIZE,
3365 .halg.base = {
3366 .cra_name = "hmac(sha512)",
2debd332 3367 .cra_driver_name = "hmac-sha512-chcr",
324429d7
HS
3368 .cra_blocksize = SHA512_BLOCK_SIZE,
3369 }
3370 }
3371 },
2debd332
HJ
3372 /* Add AEAD Algorithms */
3373 {
3374 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3375 .is_registered = 0,
3376 .alg.aead = {
3377 .base = {
3378 .cra_name = "gcm(aes)",
3379 .cra_driver_name = "gcm-aes-chcr",
3380 .cra_blocksize = 1,
e29abda5 3381 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3382 .cra_ctxsize = sizeof(struct chcr_context) +
3383 sizeof(struct chcr_aead_ctx) +
3384 sizeof(struct chcr_gcm_ctx),
3385 },
3386 .ivsize = 12,
3387 .maxauthsize = GHASH_DIGEST_SIZE,
3388 .setkey = chcr_gcm_setkey,
3389 .setauthsize = chcr_gcm_setauthsize,
3390 }
3391 },
3392 {
3393 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3394 .is_registered = 0,
3395 .alg.aead = {
3396 .base = {
3397 .cra_name = "rfc4106(gcm(aes))",
3398 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3399 .cra_blocksize = 1,
e29abda5 3400 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3401 .cra_ctxsize = sizeof(struct chcr_context) +
3402 sizeof(struct chcr_aead_ctx) +
3403 sizeof(struct chcr_gcm_ctx),
3404
3405 },
3406 .ivsize = 8,
3407 .maxauthsize = GHASH_DIGEST_SIZE,
3408 .setkey = chcr_gcm_setkey,
3409 .setauthsize = chcr_4106_4309_setauthsize,
3410 }
3411 },
3412 {
3413 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3414 .is_registered = 0,
3415 .alg.aead = {
3416 .base = {
3417 .cra_name = "ccm(aes)",
3418 .cra_driver_name = "ccm-aes-chcr",
3419 .cra_blocksize = 1,
e29abda5 3420 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3421 .cra_ctxsize = sizeof(struct chcr_context) +
3422 sizeof(struct chcr_aead_ctx),
3423
3424 },
3425 .ivsize = AES_BLOCK_SIZE,
3426 .maxauthsize = GHASH_DIGEST_SIZE,
3427 .setkey = chcr_aead_ccm_setkey,
3428 .setauthsize = chcr_ccm_setauthsize,
3429 }
3430 },
3431 {
3432 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3433 .is_registered = 0,
3434 .alg.aead = {
3435 .base = {
3436 .cra_name = "rfc4309(ccm(aes))",
3437 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3438 .cra_blocksize = 1,
e29abda5 3439 .cra_priority = CHCR_AEAD_PRIORITY + 1,
2debd332
HJ
3440 .cra_ctxsize = sizeof(struct chcr_context) +
3441 sizeof(struct chcr_aead_ctx),
3442
3443 },
3444 .ivsize = 8,
3445 .maxauthsize = GHASH_DIGEST_SIZE,
3446 .setkey = chcr_aead_rfc4309_setkey,
3447 .setauthsize = chcr_4106_4309_setauthsize,
3448 }
3449 },
3450 {
3451 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3452 .is_registered = 0,
3453 .alg.aead = {
3454 .base = {
3455 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3456 .cra_driver_name =
3457 "authenc-hmac-sha1-cbc-aes-chcr",
3458 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3459 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3460 .cra_ctxsize = sizeof(struct chcr_context) +
3461 sizeof(struct chcr_aead_ctx) +
3462 sizeof(struct chcr_authenc_ctx),
3463
3464 },
3465 .ivsize = AES_BLOCK_SIZE,
3466 .maxauthsize = SHA1_DIGEST_SIZE,
3467 .setkey = chcr_authenc_setkey,
3468 .setauthsize = chcr_authenc_setauthsize,
3469 }
3470 },
3471 {
3472 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3473 .is_registered = 0,
3474 .alg.aead = {
3475 .base = {
3476
3477 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3478 .cra_driver_name =
3479 "authenc-hmac-sha256-cbc-aes-chcr",
3480 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3481 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3482 .cra_ctxsize = sizeof(struct chcr_context) +
3483 sizeof(struct chcr_aead_ctx) +
3484 sizeof(struct chcr_authenc_ctx),
3485
3486 },
3487 .ivsize = AES_BLOCK_SIZE,
3488 .maxauthsize = SHA256_DIGEST_SIZE,
3489 .setkey = chcr_authenc_setkey,
3490 .setauthsize = chcr_authenc_setauthsize,
3491 }
3492 },
3493 {
3494 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3495 .is_registered = 0,
3496 .alg.aead = {
3497 .base = {
3498 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3499 .cra_driver_name =
3500 "authenc-hmac-sha224-cbc-aes-chcr",
3501 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3502 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3503 .cra_ctxsize = sizeof(struct chcr_context) +
3504 sizeof(struct chcr_aead_ctx) +
3505 sizeof(struct chcr_authenc_ctx),
3506 },
3507 .ivsize = AES_BLOCK_SIZE,
3508 .maxauthsize = SHA224_DIGEST_SIZE,
3509 .setkey = chcr_authenc_setkey,
3510 .setauthsize = chcr_authenc_setauthsize,
3511 }
3512 },
3513 {
3514 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3515 .is_registered = 0,
3516 .alg.aead = {
3517 .base = {
3518 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3519 .cra_driver_name =
3520 "authenc-hmac-sha384-cbc-aes-chcr",
3521 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3522 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3523 .cra_ctxsize = sizeof(struct chcr_context) +
3524 sizeof(struct chcr_aead_ctx) +
3525 sizeof(struct chcr_authenc_ctx),
3526
3527 },
3528 .ivsize = AES_BLOCK_SIZE,
3529 .maxauthsize = SHA384_DIGEST_SIZE,
3530 .setkey = chcr_authenc_setkey,
3531 .setauthsize = chcr_authenc_setauthsize,
3532 }
3533 },
3534 {
3535 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3536 .is_registered = 0,
3537 .alg.aead = {
3538 .base = {
3539 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3540 .cra_driver_name =
3541 "authenc-hmac-sha512-cbc-aes-chcr",
3542 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3543 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3544 .cra_ctxsize = sizeof(struct chcr_context) +
3545 sizeof(struct chcr_aead_ctx) +
3546 sizeof(struct chcr_authenc_ctx),
3547
3548 },
3549 .ivsize = AES_BLOCK_SIZE,
3550 .maxauthsize = SHA512_DIGEST_SIZE,
3551 .setkey = chcr_authenc_setkey,
3552 .setauthsize = chcr_authenc_setauthsize,
3553 }
3554 },
3555 {
3556 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3557 .is_registered = 0,
3558 .alg.aead = {
3559 .base = {
3560 .cra_name = "authenc(digest_null,cbc(aes))",
3561 .cra_driver_name =
3562 "authenc-digest_null-cbc-aes-chcr",
3563 .cra_blocksize = AES_BLOCK_SIZE,
e29abda5 3564 .cra_priority = CHCR_AEAD_PRIORITY,
2debd332
HJ
3565 .cra_ctxsize = sizeof(struct chcr_context) +
3566 sizeof(struct chcr_aead_ctx) +
3567 sizeof(struct chcr_authenc_ctx),
3568
3569 },
3570 .ivsize = AES_BLOCK_SIZE,
3571 .maxauthsize = 0,
3572 .setkey = chcr_aead_digest_null_setkey,
3573 .setauthsize = chcr_authenc_null_setauthsize,
3574 }
3575 },
324429d7
HS
3576};
3577
3578/*
3579 * chcr_unregister_alg - Deregister crypto algorithms with
3580 * kernel framework.
3581 */
3582static int chcr_unregister_alg(void)
3583{
3584 int i;
3585
3586 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3587 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3588 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3589 if (driver_algs[i].is_registered)
3590 crypto_unregister_alg(
3591 &driver_algs[i].alg.crypto);
3592 break;
2debd332
HJ
3593 case CRYPTO_ALG_TYPE_AEAD:
3594 if (driver_algs[i].is_registered)
3595 crypto_unregister_aead(
3596 &driver_algs[i].alg.aead);
3597 break;
324429d7
HS
3598 case CRYPTO_ALG_TYPE_AHASH:
3599 if (driver_algs[i].is_registered)
3600 crypto_unregister_ahash(
3601 &driver_algs[i].alg.hash);
3602 break;
3603 }
3604 driver_algs[i].is_registered = 0;
3605 }
3606 return 0;
3607}
3608
3609#define SZ_AHASH_CTX sizeof(struct chcr_context)
3610#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3611#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3612#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3613
3614/*
3615 * chcr_register_alg - Register crypto algorithms with kernel framework.
3616 */
3617static int chcr_register_alg(void)
3618{
3619 struct crypto_alg ai;
3620 struct ahash_alg *a_hash;
3621 int err = 0, i;
3622 char *name = NULL;
3623
3624 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3625 if (driver_algs[i].is_registered)
3626 continue;
3627 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3628 case CRYPTO_ALG_TYPE_ABLKCIPHER:
b8fd1f41
HJ
3629 driver_algs[i].alg.crypto.cra_priority =
3630 CHCR_CRA_PRIORITY;
3631 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3632 driver_algs[i].alg.crypto.cra_flags =
3633 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3634 CRYPTO_ALG_NEED_FALLBACK;
3635 driver_algs[i].alg.crypto.cra_ctxsize =
3636 sizeof(struct chcr_context) +
3637 sizeof(struct ablk_ctx);
3638 driver_algs[i].alg.crypto.cra_alignmask = 0;
3639 driver_algs[i].alg.crypto.cra_type =
3640 &crypto_ablkcipher_type;
324429d7
HS
3641 err = crypto_register_alg(&driver_algs[i].alg.crypto);
3642 name = driver_algs[i].alg.crypto.cra_driver_name;
3643 break;
2debd332 3644 case CRYPTO_ALG_TYPE_AEAD:
2debd332 3645 driver_algs[i].alg.aead.base.cra_flags =
0e93708d
HJ
3646 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3647 CRYPTO_ALG_NEED_FALLBACK;
2debd332
HJ
3648 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3649 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3650 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3651 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3652 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3653 err = crypto_register_aead(&driver_algs[i].alg.aead);
3654 name = driver_algs[i].alg.aead.base.cra_driver_name;
3655 break;
324429d7
HS
3656 case CRYPTO_ALG_TYPE_AHASH:
3657 a_hash = &driver_algs[i].alg.hash;
3658 a_hash->update = chcr_ahash_update;
3659 a_hash->final = chcr_ahash_final;
3660 a_hash->finup = chcr_ahash_finup;
3661 a_hash->digest = chcr_ahash_digest;
3662 a_hash->export = chcr_ahash_export;
3663 a_hash->import = chcr_ahash_import;
3664 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3665 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3666 a_hash->halg.base.cra_module = THIS_MODULE;
3667 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3668 a_hash->halg.base.cra_alignmask = 0;
3669 a_hash->halg.base.cra_exit = NULL;
3670 a_hash->halg.base.cra_type = &crypto_ahash_type;
3671
3672 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3673 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3674 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3675 a_hash->init = chcr_hmac_init;
3676 a_hash->setkey = chcr_ahash_setkey;
3677 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3678 } else {
3679 a_hash->init = chcr_sha_init;
3680 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3681 a_hash->halg.base.cra_init = chcr_sha_cra_init;
3682 }
3683 err = crypto_register_ahash(&driver_algs[i].alg.hash);
3684 ai = driver_algs[i].alg.hash.halg.base;
3685 name = ai.cra_driver_name;
3686 break;
3687 }
3688 if (err) {
3689 pr_err("chcr : %s : Algorithm registration failed\n",
3690 name);
3691 goto register_err;
3692 } else {
3693 driver_algs[i].is_registered = 1;
3694 }
3695 }
3696 return 0;
3697
3698register_err:
3699 chcr_unregister_alg();
3700 return err;
3701}
3702
3703/*
3704 * start_crypto - Register the crypto algorithms.
3705 * This should called once when the first device comesup. After this
3706 * kernel will start calling driver APIs for crypto operations.
3707 */
3708int start_crypto(void)
3709{
3710 return chcr_register_alg();
3711}
3712
3713/*
3714 * stop_crypto - Deregister all the crypto algorithms with kernel.
3715 * This should be called once when the last device goes down. After this
3716 * kernel will not call the driver API for crypto operations.
3717 */
3718int stop_crypto(void)
3719{
3720 chcr_unregister_alg();
3721 return 0;
3722}